]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Sep 2010 14:26:27 +0000 (07:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Sep 2010 14:26:27 +0000 (07:26 -0700)
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: Range check cpu in blk_cpu_to_group
  scatterlist: prevent invalid free when alloc fails
  writeback: Fix lost wake-up shutting down writeback thread
  writeback: do not lose wakeup events when forking bdi threads
  cciss: fix reporting of max queue depth since init
  block: switch s390 tape_block and mg_disk to elevator_change()
  block: add function call to switch the IO scheduler from a driver
  fs/bio-integrity.c: return -ENOMEM on kmalloc failure
  bio-integrity.c: remove dependency on __GFP_NOFAIL
  BLOCK: fix bio.bi_rw handling
  block: put dev->kobj in blk_register_queue fail path
  cciss: handle allocation failure
  cfq-iosched: Documentation help for new tunables
  cfq-iosched: blktrace print per slice sector stats
  cfq-iosched: Implement tunable group_idle
  cfq-iosched: Do group share accounting in IOPS when slice_idle=0
  cfq-iosched: Do not idle if slice_idle=0
  cciss: disable doorbell reset on reset_devices
  blkio: Fix return code for mkdir calls

729 files changed:
Documentation/DocBook/kernel-locking.tmpl
Documentation/DocBook/tracepoint.tmpl
Documentation/gpio.txt
Documentation/kernel-parameters.txt
Documentation/lguest/Makefile
Documentation/lguest/lguest.c
Documentation/mutex-design.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/cache.h
arch/alpha/kernel/err_marvel.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/proto.h
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_takara.c
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/it8152.c
arch/arm/configs/omap_4430sdp_defconfig
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/etm.c
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/clock.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-imx/mach-cpuimx27.c
arch/arm/mach-imx/mach-pca100.c
arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/clock-imx35.c
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-mx5/clock-mx51.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/include/mach/entry-macro.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/include/mach/mfp-pxa300.h
arch/arm/mach-s3c2410/include/mach/vmalloc.h
arch/arm/mach-s3c64xx/include/mach/vmalloc.h
arch/arm/mach-s5p6440/include/mach/vmalloc.h
arch/arm/mach-s5p6442/include/mach/vmalloc.h
arch/arm/mach-s5pv210/include/mach/vmalloc.h
arch/arm/mach-s5pv310/clock.c
arch/arm/mach-s5pv310/cpu.c
arch/arm/mach-s5pv310/include/mach/irqs.h
arch/arm/mach-s5pv310/include/mach/map.h
arch/arm/mach-s5pv310/include/mach/regs-clock.h
arch/arm/mach-s5pv310/include/mach/vmalloc.h
arch/arm/mach-s5pv310/platsmp.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/pm_runtime.c [new file with mode: 0644]
arch/arm/mach-tegra/board-harmony.c
arch/arm/mach-tegra/include/mach/vmalloc.h
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
arch/arm/plat-mxc/tzic.c
arch/arm/plat-omap/include/plat/smp.h
arch/arm/plat-pxa/pwm.c
arch/arm/plat-s5p/include/plat/map-s5p.h
arch/arm/tools/mach-types
arch/blackfin/include/asm/bfin_sport.h
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/unistd.h
arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
arch/blackfin/mach-bf527/boards/cm_bf527.c
arch/blackfin/mach-bf527/boards/ezbrd.c
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
arch/blackfin/mach-bf533/include/mach/defBF532.h
arch/blackfin/mach-bf537/include/mach/defBF534.h
arch/blackfin/mach-bf538/include/mach/defBF539.h
arch/blackfin/mach-bf548/boards/cm_bf548.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
arch/blackfin/mach-bf561/include/mach/defBF561.h
arch/blackfin/mach-common/entry.S
arch/h8300/include/asm/atomic.h
arch/h8300/include/asm/system.h
arch/h8300/kernel/sys_h8300.c
arch/h8300/kernel/traps.c
arch/ia64/hp/sim/simserial.c
arch/m68knommu/kernel/vmlinux.lds.S
arch/mn10300/mm/dma-alloc.c
arch/powerpc/Makefile
arch/powerpc/boot/dts/canyonlands.dts
arch/powerpc/include/asm/fsldma.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/rwsem.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/head_44x.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sys_ppc32.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/vio.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/tlb_nohash_low.S
arch/powerpc/platforms/83xx/mpc837x_mds.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/p1022_ds.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/iseries/iommu.c
arch/powerpc/platforms/powermac/feature.c
arch/powerpc/platforms/powermac/pci.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/qe_lib/qe.c
arch/powerpc/xmon/xmon.c
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/tlb.h
arch/s390/include/asm/tlbflush.h
arch/s390/kernel/entry.h
arch/s390/kernel/smp.c
arch/s390/mm/init.c
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/backoff.h
arch/sparc/include/asm/oplib_64.h
arch/sparc/include/asm/rwsem-const.h [deleted file]
arch/sparc/include/asm/rwsem.h
arch/sparc/include/asm/system_64.h
arch/sparc/kernel/process_64.c
arch/sparc/lib/Makefile
arch/sparc/lib/atomic_64.S
arch/sparc/lib/bitops.S
arch/sparc/lib/rwsem_64.S [deleted file]
arch/sparc/prom/cif.S
arch/sparc/prom/console_64.c
arch/sparc/prom/devops_64.c
arch/sparc/prom/misc_64.c
arch/sparc/prom/p1275.c
arch/sparc/prom/tree_64.c
arch/um/drivers/mconsole_kern.c
arch/x86/include/asm/iomap.h
arch/x86/include/asm/pci.h
arch/x86/include/asm/tsc.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/tsc.c
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
arch/x86/power/cpu.c
arch/x86/xen/platform-pci-unplug.c
crypto/Kconfig
crypto/ahash.c
crypto/algboss.c
crypto/testmgr.c
drivers/acpi/pci_root.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-sff.c
drivers/ata/pata_artop.c
drivers/ata/pata_cmd64x.c
drivers/ata/pata_legacy.c
drivers/ata/pata_via.c
drivers/ata/pata_winbond.c [deleted file]
drivers/ata/sata_dwc_460ex.c
drivers/ata/sata_mv.c
drivers/base/firmware_class.c
drivers/block/xen-blkfront.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/hangcheck-timer.c
drivers/char/hvc_console.c
drivers/char/hvsi.c
drivers/char/hw_random/n2-drv.c
drivers/char/ip2/ip2main.c
drivers/char/rocket.c
drivers/char/synclink_gt.c
drivers/char/sysrq.c
drivers/char/tty_io.c
drivers/char/vt.c
drivers/char/vt_ioctl.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mce_amd.c
drivers/firewire/core-transaction.c
drivers/firewire/net.c
drivers/firewire/ohci.c
drivers/firewire/sbp2.c
drivers/gpio/sx150x.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/mga/mga_state.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_i2c.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nvc0_instmem.c
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_clocks.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/savage/savage_bci.c
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/via/via_dma.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/hwmon/ads7871.c
drivers/hwmon/coretemp.c
drivers/hwmon/hp_accel.c
drivers/hwmon/k8temp.c
drivers/ieee1394/ohci1394.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/input.c
drivers/input/keyboard/hil_kbd.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/uinput.c
drivers/input/mouse/bcm5974.c
drivers/input/mousedev.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_wac.c
drivers/isdn/hardware/avm/Kconfig
drivers/macintosh/via-pmu.c
drivers/md/.gitignore [deleted file]
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/md.h
drivers/media/dvb/mantis/Kconfig
drivers/mmc/core/sdio.c
drivers/mmc/host/at91_mci.c
drivers/mmc/host/imxmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mtd/ubi/Kconfig.debug
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/scan.c
drivers/mtd/ubi/wl.c
drivers/net/3c59x.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/caif/Kconfig
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/lib.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/ibm_newemac/debug.c
drivers/net/ibmveth.c
drivers/net/ll_temac_main.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/phy/phy_device.c
drivers/net/pxa168_eth.c [new file with mode: 0644]
drivers/net/qlcnic/qlcnic_main.c
drivers/net/qlge/qlge_main.c
drivers/net/sh_eth.c
drivers/net/usb/ipheth.c
drivers/net/wireless/adm8211.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ar9170/main.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/regd.h
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/led.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rtl818x/rtl8180_dev.c
drivers/net/wireless/rtl818x/rtl8187_dev.c
drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/pci/hotplug/acpi_pcihp.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_acpi.c
drivers/pci/hotplug/pciehp_core.c
drivers/pci/pci.h
drivers/pci/pcie/Makefile
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv_acpi.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/pme.c [moved from drivers/pci/pcie/pme/pcie_pme.c with 83% similarity]
drivers/pci/pcie/pme/Makefile [deleted file]
drivers/pci/pcie/pme/pcie_pme.h [deleted file]
drivers/pci/pcie/pme/pcie_pme_acpi.c [deleted file]
drivers/pci/pcie/portdrv.h
drivers/pci/pcie/portdrv_acpi.c [new file with mode: 0644]
drivers/pci/pcie/portdrv_core.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/slot.c
drivers/platform/x86/Kconfig
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel_rar_register.c
drivers/platform/x86/intel_scu_ipc.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/s390/char/ctrlchar.c
drivers/s390/char/keyboard.c
drivers/serial/68328serial.c
drivers/serial/8250_early.c
drivers/serial/bfin_sport_uart.c
drivers/serial/sn_console.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/batman-adv/bat_sysfs.c
drivers/staging/batman-adv/hard-interface.c
drivers/staging/batman-adv/icmp_socket.c
drivers/staging/batman-adv/main.c
drivers/staging/batman-adv/originator.c
drivers/staging/batman-adv/routing.c
drivers/staging/batman-adv/types.h
drivers/staging/comedi/drivers/das08_cs.c
drivers/staging/hv/netvsc_drv.c
drivers/staging/hv/ring_buffer.c
drivers/staging/hv/storvsc_api.h
drivers/staging/hv/storvsc_drv.c
drivers/staging/octeon/Kconfig
drivers/staging/rt2860/usb_main_dev.c
drivers/staging/sep/Kconfig [deleted file]
drivers/staging/sep/Makefile [deleted file]
drivers/staging/sep/TODO [deleted file]
drivers/staging/sep/sep_dev.h [deleted file]
drivers/staging/sep/sep_driver.c [deleted file]
drivers/staging/sep/sep_driver_api.h [deleted file]
drivers/staging/sep/sep_driver_config.h [deleted file]
drivers/staging/sep/sep_driver_hw_defs.h [deleted file]
drivers/staging/spectra/Kconfig
drivers/staging/spectra/ffsport.c
drivers/staging/spectra/flash.c
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/zram/zram_drv.c
drivers/usb/atm/cxacru.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/message.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/rndis.h
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/uvc_v4l2.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/xhci-ring.c
drivers/usb/misc/adutux.c
drivers/usb/misc/iowarrior.c
drivers/usb/otg/twl4030-usb.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/navman.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/ssu100.c
drivers/usb/serial/usb-serial.c
drivers/vhost/vhost.c
drivers/video/pxa168fb.c
drivers/xen/events.c
drivers/xen/manage.c
firmware/Makefile
fs/9p/fid.c
fs/binfmt_misc.c
fs/ceph/addr.c
fs/ceph/auth_x.c
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/osd_client.c
fs/ceph/snap.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/asn1.c
fs/cifs/cifs_unicode.h
fs/cifs/cifs_uniupr.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ntlmssp.h
fs/cifs/sess.c
fs/cifs/transport.c
fs/direct-io.c
fs/ecryptfs/crypto.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/kthread.c
fs/ecryptfs/messaging.c
fs/ecryptfs/miscdev.c
fs/fcntl.c
fs/fuse/dev.c
fs/fuse/file.c
fs/minix/namei.c
fs/namespace.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/nilfs2/the_nilfs.c
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fsnotify.c
fs/ocfs2/alloc.c
fs/ocfs2/blockcheck.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/proc/page.c
fs/proc/task_mmu.c
fs/sysfs/file.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_fsops.h
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_priv.h
fs/xfs/xfs_vnodeops.c
include/acpi/acpi_bus.h
include/asm-generic/gpio.h
include/asm-generic/percpu.h
include/drm/drmP.h
include/drm/i830_drm.h
include/drm/i915_drm.h
include/drm/mga_drm.h
include/drm/nouveau_drm.h
include/drm/radeon_drm.h
include/drm/savage_drm.h
include/linux/acpi.h
include/linux/cgroup.h
include/linux/fanotify.h
include/linux/fsnotify_backend.h
include/linux/i2c/sx150x.h
include/linux/if_ether.h
include/linux/if_fddi.h
include/linux/if_hippi.h
include/linux/if_pppox.h
include/linux/intel-gtt.h [new file with mode: 0644]
include/linux/io-mapping.h
include/linux/ipv6.h
include/linux/kfifo.h
include/linux/kobject.h
include/linux/kobject_ns.h [new file with mode: 0644]
include/linux/ksm.h
include/linux/lglock.h
include/linux/libata.h
include/linux/miscdevice.h
include/linux/mm.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/mutex.h
include/linux/nbd.h
include/linux/ncp.h
include/linux/netfilter/xt_IDLETIMER.h
include/linux/netfilter/xt_ipvs.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/phonet.h
include/linux/pxa168_eth.h [new file with mode: 0644]
include/linux/rfkill.h
include/linux/semaphore.h
include/linux/serial.h
include/linux/serial_core.h
include/linux/swap.h
include/linux/sysfs.h
include/linux/sysrq.h
include/linux/uinput.h
include/linux/usb/composite.h
include/linux/usb/serial.h
include/linux/vgaarb.h
include/linux/vmstat.h
include/linux/workqueue.h
include/net/tcp.h
include/trace/events/timer.h
include/xen/platform_pci.h
kernel/cgroup.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_bp.c
kernel/debug/kdb/kdb_main.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/mutex.c
kernel/perf_event.c
kernel/pm_qos_params.c
kernel/power/hibernate.c
kernel/power/poweroff.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/sched.c
kernel/sched_fair.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_stack.c
kernel/watchdog.c
kernel/workqueue.c
lib/kobject_uevent.c
lib/raid6/.gitignore [new file with mode: 0644]
mm/Kconfig
mm/bounce.c
mm/compaction.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
mm/mmzone.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/percpu_up.c
mm/rmap.c
mm/swapfile.c
mm/vmstat.c
net/8021q/vlan_dev.c
net/ax25/ax25_ds_timer.c
net/bridge/br_netfilter.c
net/caif/cfrfml.c
net/core/gen_estimator.c
net/core/skbuff.c
net/ipv4/Kconfig
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_timer.c
net/ipv6/netfilter/ip6_tables.c
net/irda/af_irda.c
net/irda/irlan/irlan_eth.c
net/l2tp/l2tp_eth.c
net/mac80211/main.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netlink/af_netlink.c
net/rds/recv.c
net/sched/act_police.c
net/sched/sch_hfsc.c
net/wireless/core.c
net/wireless/wext-compat.c
net/wireless/wext-core.c
net/xfrm/xfrm_user.c
scripts/kconfig/confdata.c
scripts/kconfig/symbol.c
scripts/mkmakefile
scripts/setlocalversion
security/apparmor/include/resource.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/path.c
security/apparmor/policy.c
security/apparmor/resource.c
security/integrity/ima/ima.h
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
sound/core/pcm.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/isa/msnd/msnd_pinnacle.c
sound/oss/sound_timer.c
sound/pci/asihpi/hpi6205.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_eld.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_intelhdmi.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/intel8x0.c
sound/pci/oxygen/oxygen.h
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/virtuoso.c
sound/pci/oxygen/xonar_wm87x6.c
sound/soc/imx/imx-ssi.c
sound/soc/soc-core.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/pcm.c
tools/perf/util/callchain.h

index 0b1a3f97f285361a4075c8e267d42b2053747d9a..a0d479d1e1dd872bd1ae7b4d17d4582df03a384c 100644 (file)
@@ -1961,6 +1961,12 @@ machines due to caching.
    </sect1>
   </chapter>
 
+  <chapter id="apiref">
+   <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+  </chapter>
+
   <chapter id="references">
    <title>Further reading</title>
 
index e8473eae2a2064ecef2afadf2e92551dc327bea3..b57a9ede32249bf51b69988740e455da06b7687c 100644 (file)
    <title>Block IO</title>
 !Iinclude/trace/events/block.h
   </chapter>
+
+  <chapter id="workqueue">
+   <title>Workqueue</title>
+!Iinclude/trace/events/workqueue.h
+  </chapter>
 </book>
index d96a6dba57489bc6bbf3e747d82cd450084e5609..9633da01ff46afb008566ccb53aa381606654e85 100644 (file)
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
 If you want to initialize a structure with an invalid GPIO number, use
 some negative number (perhaps "-EINVAL"); that will never be valid.  To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
 
        int gpio_is_valid(int number);
 
 A number that's not valid will be rejected by calls which may request
 or free GPIOs (see below).  Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
+example, a number might be valid but temporarily unused on a given board.
 
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime.  Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
 
 Using GPIOs
 -----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
 ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
 and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
 three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
 
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space.  (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
 into the kernel on that architecture.
 
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
 can enable it and build it into the kernel optionally.
 
 If neither of these options are selected, the platform does not support
index 2c85c0692b015cc099710b0ba4b9a4f0c30a745d..8dd7248508a9e12ac8908c24fb4a3649f157697a 100644 (file)
@@ -1974,15 +1974,18 @@ and is between 256 and 4096 characters. It is defined in the file
                force   Enable ASPM even on devices that claim not to support it.
                        WARNING: Forcing ASPM on may cause system lockups.
 
+       pcie_ports=     [PCIE] PCIe ports handling:
+               auto    Ask the BIOS whether or not to use native PCIe services
+                       associated with PCIe ports (PME, hot-plug, AER).  Use
+                       them only if that is allowed by the BIOS.
+               native  Use native PCIe services associated with PCIe ports
+                       unconditionally.
+               compat  Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
+                       ports driver.
+
        pcie_pme=       [PCIE,PM] Native PCIe PME signaling options:
-                       Format: {auto|force}[,nomsi]
-               auto    Use native PCIe PME signaling if the BIOS allows the
-                       kernel to control PCIe config registers of root ports.
-               force   Use native PCIe PME signaling even if the BIOS refuses
-                       to allow the kernel to control the relevant PCIe config
-                       registers.
                nomsi   Do not use MSI for native PCIe PME signaling (this makes
-                       all PCIe root ports use INTx for everything).
+                       all PCIe root ports use INTx for all services).
 
        pcmv=           [HW,PCMCIA] BadgePAD 4
 
@@ -2629,8 +2632,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        aux-ide-disks -- unplug non-primary-master IDE devices
                        nics -- unplug network devices
                        all -- unplug all emulated devices (NICs and IDE disks)
-                       ignore -- continue loading the Xen platform PCI driver even
-                               if the version check failed
+                       unnecessary -- unplugging emulated devices is
+                               unnecessary even if the host did not respond to
+                               the unplug protocol
+                       never -- do not unplug even if version check succeeds
 
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
index 28c8cdfcafd8693898ae4c4a4d9bc5f4f9f77d62..bebac6b4f332cc117287f58c0cf5df11bf5531f8 100644 (file)
@@ -1,5 +1,6 @@
 # This creates the demonstration utility "lguest" which runs a Linux guest.
-CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
+# Missing headers?  Add "-I../../include -I../../arch/x86/include"
+CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
 
 all: lguest
 
index e9ce3c5545145f9b2c683c26690c0159b6c437fc..8a6a8c6d498043dc00184906d71a55e680844ef2 100644 (file)
 #include <limits.h>
 #include <stddef.h>
 #include <signal.h>
-#include "linux/lguest_launcher.h"
-#include "linux/virtio_config.h"
-#include "linux/virtio_net.h"
-#include "linux/virtio_blk.h"
-#include "linux/virtio_console.h"
-#include "linux/virtio_rng.h"
-#include "linux/virtio_ring.h"
-#include "asm/bootparam.h"
+#include <linux/virtio_config.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_blk.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_rng.h>
+#include <linux/virtio_ring.h>
+#include <asm/bootparam.h>
+#include "../../include/linux/lguest_launcher.h"
 /*L:110
  * We can ignore the 42 include files we need for this program, but I do want
  * to draw attention to the use of kernel-style types.
@@ -1447,14 +1447,15 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name)
 static void configure_device(int fd, const char *tapif, u32 ipaddr)
 {
        struct ifreq ifr;
-       struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+       struct sockaddr_in sin;
 
        memset(&ifr, 0, sizeof(ifr));
        strcpy(ifr.ifr_name, tapif);
 
        /* Don't read these incantations.  Just cut & paste them like I did! */
-       sin->sin_family = AF_INET;
-       sin->sin_addr.s_addr = htonl(ipaddr);
+       sin.sin_family = AF_INET;
+       sin.sin_addr.s_addr = htonl(ipaddr);
+       memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
        if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
                err(1, "Setting %s interface address", tapif);
        ifr.ifr_flags = IFF_UP;
index c91ccc0720fa97f42a1a616fd83e23628ae85ab1..38c10fd7f4110448facd7089b985c4776d264d85 100644 (file)
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
 mutex semantics are sufficient for your code, then there are a couple
 of advantages of mutexes:
 
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
    'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
    A smaller structure size means less RAM footprint, and better
    CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
  void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  int  mutex_lock_interruptible_nested(struct mutex *lock,
                                       unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
index ce46fa1e643e876344071d480e1e6d6d54b89856..37c6aad5e590ac1375944b8edf0656aa7b106607 100644 (file)
@@ -296,6 +296,7 @@ Conexant 5051
 Conexant 5066
 =============
   laptop       Basic Laptop config (default)
+  hp-laptop    HP laptops, e g G60
   dell-laptop  Dell laptops
   dell-vostro  Dell Vostro
   olpc-xo-1_5  OLPC XO 1.5
index 433f353857563f596df86cf95bc4b8114c45d3ea..9800de5ec22285375123299c79759a1111290ced 100644 (file)
@@ -454,6 +454,17 @@ L: linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/hw/amso1100/
 
+ANALOG DEVICES INC ASOC DRIVERS
+L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+W:     http://blackfin.uclinux.org/
+S:     Supported
+F:     sound/soc/blackfin/*
+F:     sound/soc/codecs/ad1*
+F:     sound/soc/codecs/adau*
+F:     sound/soc/codecs/adav*
+F:     sound/soc/codecs/ssm*
+
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linuxppc-dev@lists.ozlabs.org
@@ -1665,8 +1676,7 @@ F:        kernel/cgroup*
 F:     mm/*cgroup*
 
 CORETEMP HARDWARE MONITORING DRIVER
-M:     Rudolf Marek <r.marek@assembler.cz>
-M:     Huaxu Wan <huaxu.wan@intel.com>
+M:     Fenghua Yu <fenghua.yu@intel.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/coretemp
@@ -2191,6 +2201,12 @@ L:       linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/ehca/
 
+EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
+M:     Breno Leitao <leitao@linux.vnet.ibm.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ehea/
+
 EMBEDDED LINUX
 M:     Paul Gortmaker <paul.gortmaker@windriver.com>
 M:     Matt Mackall <mpm@selenic.com>
@@ -2286,6 +2302,12 @@ S:       Maintained
 F:     Documentation/hwmon/f71805f
 F:     drivers/hwmon/f71805f.c
 
+FANOTIFY
+M:     Eric Paris <eparis@redhat.com>
+S:     Maintained
+F:     fs/notify/fanotify/
+F:     include/linux/fanotify.h
+
 FARSYNC SYNCHRONOUS DRIVER
 M:     Kevin Curtis <kevin.curtis@farsite.co.uk>
 W:     http://www.farsite.co.uk/
@@ -2765,11 +2787,6 @@ S:       Maintained
 F:     arch/x86/kernel/hpet.c
 F:     arch/x86/include/asm/hpet.h
 
-HPET:  ACPI
-M:     Bob Picco <bob.picco@hp.com>
-S:     Maintained
-F:     drivers/char/hpet.c
-
 HPFS FILESYSTEM
 M:     Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
 W:     http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3382,7 +3399,7 @@ F:        drivers/s390/kvm/
 
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
-W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W:     http://kernel.org/pub/linux/utils/kernel/kexec/
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -3478,7 +3495,7 @@ LGUEST
 M:     Rusty Russell <rusty@rustcorp.com.au>
 L:     lguest@lists.ozlabs.org
 W:     http://lguest.ozlabs.org/
-S:     Maintained
+S:     Odd Fixes
 F:     Documentation/lguest/
 F:     arch/x86/lguest/
 F:     drivers/lguest/
@@ -3907,8 +3924,7 @@ F:        Documentation/sound/oss/MultiSound
 F:     sound/oss/msnd*
 
 MULTITECH MULTIPORT CARD (ISICOM)
-M:     Jiri Slaby <jirislaby@gmail.com>
-S:     Maintained
+S:     Orphan
 F:     drivers/char/isicom.c
 F:     include/linux/isicom.h
 
@@ -4588,7 +4604,7 @@ F:        include/linux/preempt.h
 PRISM54 WIRELESS DRIVER
 M:     "Luis R. Rodriguez" <mcgrof@gmail.com>
 L:     linux-wireless@vger.kernel.org
-W:     http://prism54.org
+W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/prism54/
 
@@ -4789,6 +4805,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4813,6 +4830,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -4820,12 +4838,10 @@ F:      kernel/rcu*
 F:     kernel/srcu*
 X:     kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
 M:     Paul Gortmaker <p_gortmaker@yahoo.com>
 S:     Maintained
-F:     Documentation/rtc.txt
-F:     drivers/rtc/
-F:     include/linux/rtc.h
+F:     drivers/char/rtc.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
index 2c0299f18d71f934af5cbc03fc047b99ce3d800c..4df9873f83b275a48037e86e058208fa9c6baffc 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
@@ -1408,8 +1408,8 @@ checkstack:
        $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
        $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
 
-kernelrelease: include/config/kernel.release
-       @echo $(KERNELRELEASE)
+kernelrelease:
+       @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
 
 kernelversion:
        @echo $(KERNELVERSION)
index f199e69a5d0b0a83fd39e7d5464a5fa439c79265..ad368a93a46a6c5c7e1bf0d25ab17f08ebc19fd8 100644 (file)
@@ -17,7 +17,6 @@
 # define L1_CACHE_SHIFT     5
 #endif
 
-#define L1_CACHE_ALIGN(x)  (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 #define SMP_CACHE_BYTES    L1_CACHE_BYTES
 
 #endif
index 52a79dfc13c6774ec1f88f2e288af1a42f774f51..5c905aaaeccd82861ea62d9186517475f0d7019c 100644 (file)
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
 #define IO7__ERR_CYC__CYCLE__M (0x7)
 
        printk("%s        Packet In Error: %s\n"
-              "%s        Error in %s, cycle %ld%s%s\n",
+              "%s        Error in %s, cycle %lld%s%s\n",
               err_print_prefix, 
               packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
               err_print_prefix,
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
        }
 
        printk("%s      Up Hose Garbage Symptom:\n"
-              "%s        Source Port: %ld - Dest PID: %ld - OpCode: %s\n", 
+              "%s        Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
               err_print_prefix,
               err_print_prefix, 
               EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
 #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M   (0xfff)
 
        printk("%s      Split Completion Error:\n"      
-              "%s         Source (Bus:Dev:Func): %ld:%ld:%ld\n",
+              "%s         Source (Bus:Dev:Func): %lld:%lld:%lld\n",
               err_print_prefix,
               err_print_prefix,
               EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
index fb58150a7e8f4fb667f10ac7ae0a772b021f9b53..5d1e6d6ce6843b136fb7810e74c86df16d86daf4 100644 (file)
@@ -252,7 +252,7 @@ SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
 
        retval = user_path(pathname, &path);
        if (!retval) {
-               retval = do_osf_statfs(&path buffer, bufsiz);
+               retval = do_osf_statfs(&path, buffer, bufsiz);
                path_put(&path);
        }
        return retval;
index 51c39fa41693bcbcbc880130a8af6e0ce411c2dc..85d8e4f58c83ce612269162b9635bd49059c39dd 100644 (file)
@@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx)
 static int alpha_perf_event_set_period(struct perf_event *event,
                                struct hw_perf_event *hwc, int idx)
 {
-       long left = atomic64_read(&hwc->period_left);
+       long left = local64_read(&hwc->period_left);
        long period = hwc->sample_period;
        int ret = 0;
 
        if (unlikely(left <= -period)) {
                left = period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
@@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event,
        if (left > (long)alpha_pmu->pmc_max_period[idx])
                left = alpha_pmu->pmc_max_period[idx];
 
-       atomic64_set(&hwc->prev_count, (unsigned long)(-left));
+       local64_set(&hwc->prev_count, (unsigned long)(-left));
 
        alpha_write_pmc(idx, (unsigned long)(-left));
 
@@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
        long delta;
 
 again:
-       prev_raw_count = atomic64_read(&hwc->prev_count);
+       prev_raw_count = local64_read(&hwc->prev_count);
        new_raw_count = alpha_read_pmc(idx);
 
-       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
                goto again;
 
@@ -316,8 +316,8 @@ again:
                delta += alpha_pmu->pmc_max_period[idx] + 1;
        }
 
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &hwc->period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &hwc->period_left);
 
        return new_raw_count;
 }
@@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!hwc->sample_period) {
                hwc->sample_period = alpha_pmu->pmc_max_period[0];
                hwc->last_period = hwc->sample_period;
-               atomic64_set(&hwc->period_left, hwc->sample_period);
+               local64_set(&hwc->period_left, hwc->sample_period);
        }
 
        return 0;
index 3d2627ec986006e956f161b2bedcfa1c2ec32272..d3e52d3fd59299771ed3e90d0fc13f0aca9faa6f 100644 (file)
@@ -156,9 +156,6 @@ extern void SMC669_Init(int);
 /* es1888.c */
 extern void es1888_init(void);
 
-/* ns87312.c */
-extern void ns87312_enable_ide(long ide_base);
-
 /* ../lib/fpreg.c */
 extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
 extern unsigned long alpha_read_fp_reg (unsigned long reg);
index affd0f3f25df09b147c4a1e06146ff2f57e550ce..14c8898d19ec86fb8d4273dc527e7d860bcb8132 100644 (file)
@@ -33,7 +33,7 @@
 #include "irq_impl.h"
 #include "pci_impl.h"
 #include "machvec_impl.h"
-
+#include "pc873xx.h"
 
 /* Note mask bit is true for DISABLED irqs.  */
 static unsigned long cached_irq_mask = ~0UL;
@@ -235,18 +235,31 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
+static inline void __init
+cabriolet_enable_ide(void)
+{
+       if (pc873xx_probe() == -1) {
+               printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+        } else {
+               printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+                       pc873xx_get_model(), pc873xx_get_base());
+
+               pc873xx_enable_ide();
+       }
+}
+
 static inline void __init
 cabriolet_init_pci(void)
 {
        common_init_pci();
-       ns87312_enable_ide(0x398);
+       cabriolet_enable_ide();
 }
 
 static inline void __init
 cia_cab_init_pci(void)
 {
        cia_init_pci();
-       ns87312_enable_ide(0x398);
+       cabriolet_enable_ide();
 }
 
 /*
index 230464885b5cbe1a7683a2c76acdf1e46fbb9bac..4da596b6adbbb408ee9bb1e918b8891d3b148a29 100644 (file)
@@ -29,7 +29,7 @@
 #include "irq_impl.h"
 #include "pci_impl.h"
 #include "machvec_impl.h"
-
+#include "pc873xx.h"
 
 /* Note mask bit is true for DISABLED irqs.  */
 static unsigned long cached_irq_mask[2] = { -1, -1 };
@@ -264,7 +264,14 @@ takara_init_pci(void)
                alpha_mv.pci_map_irq = takara_map_irq_srm;
 
        cia_init_pci();
-       ns87312_enable_ide(0x26e);
+
+       if (pc873xx_probe() == -1) {
+               printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+       } else {
+               printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+                       pc873xx_get_model(), pc873xx_get_base());
+               pc873xx_enable_ide();
+       }
 }
 
 
index 92951103255a1dff3e30d237a676f8d8bfa137e3..553b7cf17bfb0bac057eaedf0402a0bca2aa47e8 100644 (file)
@@ -1576,95 +1576,6 @@ config AUTO_ZRELADDR
          0xf8000000. This assumes the zImage being placed in the first 128MB
          from start of memory.
 
-config ZRELADDR
-       hex "Physical address of the decompressed kernel image"
-       depends on !AUTO_ZRELADDR
-       default 0x00008000 if ARCH_BCMRING ||\
-               ARCH_CNS3XXX ||\
-               ARCH_DOVE ||\
-               ARCH_EBSA110 ||\
-               ARCH_FOOTBRIDGE ||\
-               ARCH_INTEGRATOR ||\
-               ARCH_IOP13XX ||\
-               ARCH_IOP33X ||\
-               ARCH_IXP2000 ||\
-               ARCH_IXP23XX ||\
-               ARCH_IXP4XX ||\
-               ARCH_KIRKWOOD ||\
-               ARCH_KS8695 ||\
-               ARCH_LOKI ||\
-               ARCH_MMP ||\
-               ARCH_MV78XX0 ||\
-               ARCH_NOMADIK ||\
-               ARCH_NUC93X ||\
-               ARCH_NS9XXX ||\
-               ARCH_ORION5X ||\
-               ARCH_SPEAR3XX ||\
-               ARCH_SPEAR6XX ||\
-               ARCH_U8500 ||\
-               ARCH_VERSATILE ||\
-               ARCH_W90X900
-       default 0x08008000 if ARCH_MX1 ||\
-               ARCH_SHARK
-       default 0x10008000 if ARCH_MSM ||\
-               ARCH_OMAP1 ||\
-               ARCH_RPC
-       default 0x20008000 if ARCH_S5P6440 ||\
-               ARCH_S5P6442 ||\
-               ARCH_S5PC100 ||\
-               ARCH_S5PV210
-       default 0x30008000 if ARCH_S3C2410 ||\
-               ARCH_S3C2400 ||\
-               ARCH_S3C2412 ||\
-               ARCH_S3C2416 ||\
-               ARCH_S3C2440 ||\
-               ARCH_S3C2443
-       default 0x40008000 if ARCH_STMP378X ||\
-               ARCH_STMP37XX ||\
-               ARCH_SH7372 ||\
-               ARCH_SH7377
-       default 0x50008000 if ARCH_S3C64XX ||\
-               ARCH_SH7367
-       default 0x60008000 if ARCH_VEXPRESS
-       default 0x80008000 if ARCH_MX25 ||\
-               ARCH_MX3 ||\
-               ARCH_NETX ||\
-               ARCH_OMAP2PLUS ||\
-               ARCH_PNX4008
-       default 0x90008000 if ARCH_MX5 ||\
-               ARCH_MX91231
-       default 0xa0008000 if ARCH_IOP32X ||\
-               ARCH_PXA ||\
-               MACH_MX27
-       default 0xc0008000 if ARCH_LH7A40X ||\
-               MACH_MX21
-       default 0xf0008000 if ARCH_AAEC2000 ||\
-               ARCH_L7200
-       default 0xc0028000 if ARCH_CLPS711X
-       default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
-       default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
-       default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
-       default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
-       default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
-       default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
-       default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-       default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
-       default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
-       default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
-       default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
-       default 0xc0208000 if ARCH_SA1100 && SA1111
-       default 0xc0008000 if ARCH_SA1100 && !SA1111
-       default 0x30108000 if ARCH_S3C2410 && PM_H1940
-       default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
-       default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
-       help
-         ZRELADDR is the physical address where the decompressed kernel
-         image will be placed. ZRELADDR has to be specified when the
-         assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
-         selected.
-
 endmenu
 
 menu "CPU Power Management"
index f705213caa881af9c07e181c0d2a3a1a26a5d98a..4a590f4113e2af044ea1764aeb0681ad7f50a74c 100644 (file)
 MKIMAGE         := $(srctree)/scripts/mkuboot.sh
 
 ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
 endif
 
 # Note: the following conditions must always be true:
+#   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
 #   PARAMS_PHYS must be within 4MB of ZRELADDR
 #   INITRD_PHYS must be in RAM
+ZRELADDR    := $(zreladdr-y)
 PARAMS_PHYS := $(params_phys-y)
 INITRD_PHYS := $(initrd_phys-y)
 
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE  $@
 ifeq ($(CONFIG_ZBOOT_ROM),y)
 $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
 else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
 endif
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
index 68775e33476c2fafb4c20d88f7f676c836a8edc1..b23f6bc46cfa1dd1029bb53dc7009c3c790f1088 100644 (file)
@@ -79,6 +79,10 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
index 6af9907c3b5ccad2ae2d73e37f5470c5b2f6b897..6825c34646d4e02f24b0eefe7ab4e012bca05208 100644 (file)
@@ -177,7 +177,7 @@ not_angel:
                and     r4, pc, #0xf8000000
                add     r4, r4, #TEXT_OFFSET
 #else
-               ldr     r4, =CONFIG_ZRELADDR
+               ldr     r4, =zreladdr
 #endif
                subs    r0, r0, r1              @ calculate the delta offset
 
index 6c091356245593b87860d2ccb6221650fc62855b..7974baacafcea74ec055a46ee0f6cea496f24e6f 100644 (file)
@@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dev->bus == &pci_bus_type) &&
+               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index 63e0c2d50f324c6eb7ed3c1ff84ded9930f12538..14c1e18c648f7340c243bd821645df57f955f950 100644 (file)
@@ -13,6 +13,9 @@ CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_OMAP=y
 CONFIG_ARCH_OMAP4=y
+# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
 # CONFIG_OMAP_MUX is not set
 CONFIG_OMAP_32K_TIMER=y
 CONFIG_OMAP_DM_TIMER=y
index c226fe10553e2952ec982ef3ec5fcaf8538586e0..c568da7dcae45e60e8630e2b3060599f561d6555 100644 (file)
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
  * DMA access and 1 if the buffer needs to be bounced.
  *
  */
-#ifdef CONFIG_SA1111
 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
-                                  size_t size)
-{
-       return 0;
-}
-#endif
 
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
index 48837e6d888722dc96f594247a80026bf9b75e29..b5799a3b7117d5480eec6456008f79e01d1064ae 100644 (file)
@@ -17,7 +17,7 @@
  * counter interrupts are regular interrupts and not an NMI. This
  * means that when we receive the interrupt we can call
  * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
+ * interrupts disabled.
  */
 static inline void
 set_perf_event_pending(void)
index d02cfb683487eeafea4ef407a1a4e6f2d4ce4112..c891eb76c0e313406847e7b9fbe968bb1b8fa459 100644 (file)
 #define __NR_perf_event_open           (__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg                  (__NR_SYSCALL_BASE+365)
 #define __NR_accept4                   (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
 
 /*
  * The following SWIs are ARM private.
index afeb71fa72cb81fc0e2fb5652c653ef34e7258bb..5c26eccef9982665b1e1672416b9bc996f3b2dae 100644 (file)
                CALL(sys_perf_event_open)
 /* 365 */      CALL(sys_recvmmsg)
                CALL(sys_accept4)
+               CALL(sys_fanotify_init)
+               CALL(sys_fanotify_mark)
+               CALL(sys_prlimit64)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 56418f98cd016e9e3668e15c4473c9ba43bf1ca6..33c7077174db118175a1818228b900f56226b45f 100644 (file)
@@ -230,7 +230,7 @@ static void etm_dump(void)
        etb_lock(t);
 }
 
-static void sysrq_etm_dump(int key, struct tty_struct *tty)
+static void sysrq_etm_dump(int key)
 {
        dev_dbg(tracer.dev, "Dumping ETB buffer\n");
        etm_dump();
index 417c392ddf1cb55066fa5f99e83e77514bd89901..ecbb0288e5dd95c80b420635dffee6ef600f86a8 100644 (file)
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
 }
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
index 753c0d31a3d3f0b407cfe20b68f5ba66189cd1a7..c67b47f1c0fd805751cf226a65315337a2507942 100644 (file)
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
        .pmc_mask       = 1 << AT91SAM9G45_ID_SSC1,
        .type           = CLK_TYPE_PERIPHERAL,
 };
-static struct clk tcb_clk = {
-       .name           = "tcb_clk",
+static struct clk tcb0_clk = {
+       .name           = "tcb0_clk",
        .pmc_mask       = 1 << AT91SAM9G45_ID_TCB,
        .type           = CLK_TYPE_PERIPHERAL,
 };
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
        .parent         = &uhphs_clk,
 };
 
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+       .name           = "tcb1_clk",
+       .pmc_mask       = 0,
+       .type           = CLK_TYPE_PERIPHERAL,
+       .parent         = &tcb0_clk,
+};
+
 static struct clk *periph_clocks[] __initdata = {
        &pioA_clk,
        &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
        &spi1_clk,
        &ssc0_clk,
        &ssc1_clk,
-       &tcb_clk,
+       &tcb0_clk,
        &pwm_clk,
        &tsc_clk,
        &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
        &mmc1_clk,
        // irq0
        &ohci_clk,
+       &tcb1_clk,
 };
 
 /*
index 809114d5a5a6690ec3c4f510bb6cb88fc7fede99..5e71ccd5e7d381ca211d1f73653351bdca814c6b 100644 (file)
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
                .end    = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
                .flags  = IORESOURCE_MEM,
        },
-       [2] = {
+       [1] = {
                .start  = AT91SAM9G45_ID_DMA,
                .end    = AT91SAM9G45_ID_DMA,
                .flags  = IORESOURCE_IRQ,
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
 static void __init at91_add_device_tc(void)
 {
        /* this chip has one clock and irq for all six TC channels */
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+       at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb0_device);
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+       at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb1_device);
 }
 #else
index c4c8865d52d7bfd84f5a22416ef7fe6bc1c472aa..65eb0943194f4b0ea4ea65613435ceef4d875c75 100644 (file)
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
                .start  = AT91_PIN_PC11,
                .end    = AT91_PIN_PC11,
                .flags  = IORESOURCE_IRQ
+                       | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
        }
 };
 
 static struct dm9000_plat_data dm9000_platdata = {
-       .flags          = DM9000_PLATF_16BITONLY,
+       .flags          = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
 };
 
 static struct platform_device dm9000_device = {
@@ -167,17 +168,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
 };
 
 
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
-       .wire4          = 1,
-//     .det_pin        = ... not connected
-//     .wp_pin         = ... not connected
-//     .vcc_pin        = ... not connected
-};
-
-
 /*
  * NAND flash
  */
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
        at91_add_device_nand(&ek_nand_data);
 }
 
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
 
 /*
  * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
 #endif
 };
 
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+       .wire4          = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
 
 /*
  * LCD Controller
index 7f7da439341fabc4e85b6febeb8b1f3e2cd6f4ee..7525cee3983f7252fac0542be5955a490ca6869b 100644 (file)
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
 int __init clk_register(struct clk *clk)
 {
        if (clk_is_peripheral(clk)) {
-               clk->parent = &mck;
+               if (!clk->parent)
+                       clk->parent = &mck;
                clk->mode = pmc_periph_mode;
                list_add_tail(&clk->node, &clocks);
        }
index 8bf3cec98cfadba46d8ca1816c7aff5b8a871bbf..4566bd1c8660b3fe7ac0cff28473746fd3d34c82 100644 (file)
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
        clkdev_add_table(clocks, ARRAY_SIZE(clocks));
        return 0;
 }
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
index 575ff1ae85a738f84cf48720d19e850b96caa0e5..339150ab0ea5d63e13f0520e06612fc827f37f49 100644 (file)
@@ -279,13 +279,13 @@ static void __init eukrea_cpuimx27_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg_host, &otg_pdata);
        }
 
        usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
        mxc_register_device(&mxc_usbh2, &usbh2_pdata);
 #endif
index a389d1148f18c641cd1c997f4798b6641e084bdc..23c9e1f37b9c022bee669908e7aeca791de3b4b4 100644 (file)
@@ -419,13 +419,13 @@ static void __init pca100_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg_host, &otg_pdata);
        }
 
        usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
        mxc_register_device(&mxc_usbh2, &usbh2_pdata);
 #endif
index 91931dcb068997d540dd2a133001eed458cbc81b..4aaadc753d3e6ff4e60b88c17e62e5b3c0cfb812 100644 (file)
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 56b2e26d23b4eda6a178e6ee633eae2526bd3186..e064bb3d69197b8ddee286eda06dee980052712d 100644 (file)
@@ -138,7 +138,7 @@ static void __init eukrea_cpuimx25_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg, &otg_pdata);
        }
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
        if (!otg_mode_host)
                mxc_register_device(&otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+       eukrea_mbimxsd25_baseboard_init();
 #endif
 }
 
index d3af0fdf8475f7ef0d67b3afbb080df739c36431..7a62e744a8b0fcbc5eef1da9645bf663199608f8 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
        if (aad->sel)
-               fref = fref * 2 / 3;
+               fref = fref * 3 / 4;
 
        return fref / aad->arm;
 }
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
+       unsigned long fref = get_rate_arm();
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
 
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
        return get_rate_ahb(NULL) >> 1;
 }
 
-static unsigned long get_3_3_div(unsigned long in)
-{
-       return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
 static unsigned long get_rate_uart(struct clk *clk)
 {
        unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div = get_3_3_div(pdr4 >> 10);
+       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
 
        if (pdr3 & (1 << 14))
                return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
                break;
        }
 
-       return rate / get_3_3_div(div);
+       return rate / (div + 1);
 }
 
 static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+       return rate / (((pdr2 >> 16) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+       return rate / (((pdr4 >> 22) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_ipg_per(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div1, div2;
+       unsigned long div;
 
        if (pdr0 & (1 << 26)) {
-               div1 = (pdr4 >> 19) & 0x7;
-               div2 = (pdr4 >> 16) & 0x7;
-               return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+               div = (pdr4 >> 16) & 0x3f;
+               return get_rate_arm() / (div + 1);
        } else {
-               div1 = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / div1;
+               div = (pdr0 >> 12) & 0x7;
+               return get_rate_ahb(NULL) / (div + 1);
        }
 }
 
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+       unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+       unsigned long fref = get_rate_mpll();
+
+       if (fref > 400 * 1000 * 1000) {
+               switch (hsp_podf) {
+               case 0:
+                       return fref >> 2;
+               case 1:
+                       return fref >> 3;
+               case 2:
+                       return fref / 3;
+               }
+       } else {
+               switch (hsp_podf) {
+               case 0:
+               case 2:
+                       return fref / 3;
+               case 1:
+                       return fref / 6;
+               }
+       }
+
+       return 0;
+}
+
 static int clk_cgr_enable(struct clk *clk)
 {
        u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk,   0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c2_clk,   1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c3_clk,   2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_hsp, NULL);
 DEFINE_CLOCK(kpp_clk,    0, CCM_CGR1, 20, get_rate_ipg, NULL);
 DEFINE_CLOCK(mlb_clk,    0, CCM_CGR1, 22, get_rate_ahb, NULL);
 DEFINE_CLOCK(mshc_clk,   0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int ll = 0;
+       unsigned int cgr2 = 3 << 26, cgr3 = 0;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       ll = (3 << 16);
+       cgr2 |= 3 << 16;
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
-       __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
-       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       /*
+        * Check if we came up in internal boot mode. If yes, we need some
+        * extra clocks turned on, otherwise the MX35 boot ROM code will
+        * hang after a watchdog reset.
+        */
+       if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+               /* Additionally turn on UART1, SCC, and IIM clocks */
+               cgr2 |= 3 << 16 | 3 << 4;
+               cgr3 |= 3 << 2;
+       }
+
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
 
        mxc_timer_init(&gpt_clk,
                        MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
index 1dc5004df866d4e9df7d817aca27e658573f8c6c..f8f15e3ac7a0e82a6cde9f6bd021c85a546bbae7 100644 (file)
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 63f970f340a2cc28482ea1484a248fade05098e8..2a4f8b781ba4c60a4d66f554d860953fcafe1cf0 100644 (file)
@@ -192,7 +192,7 @@ static void __init mxc_board_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg_host, &otg_pdata);
        }
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
        if (!otg_mode_host)
                mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+       eukrea_mbimxsd35_baseboard_init();
 #endif
 }
 
index 6af69def357f92d2f177d19d8fc7bce330ff5666..57c10a9926cc5056668645ff39fe3ac07fed9969 100644 (file)
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
 {
        u32 reg;
        reg = __raw_readl(clk->enable_reg);
-       reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
        __raw_writel(reg, clk->enable_reg);
 
 }
index 63b2d8859c3c291af8e8af729f63d8302ad4915e..88d3a1e920f583110a88aa8c90bbe7903b098f8f 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_LOCAL_TIMERS)            += timer-mpu.o
 obj-$(CONFIG_HOTPLUG_CPU)              += omap-hotplug.o
 obj-$(CONFIG_ARCH_OMAP4)               += omap44xx-smc.o omap4-common.o
 
+AFLAGS_omap-headsmp.o                  :=-Wa,-march=armv7-a
 AFLAGS_omap44xx-smc.o                  :=-Wa,-march=armv7-a
 
 # Functions loaded to SRAM
index 138646deac8932210cc6168ab647444908c92b85..dfdce2d8277992b3e6ccb9fcfbc247e3b8df3708 100644 (file)
@@ -3417,7 +3417,13 @@ int __init omap3xxx_clk_init(void)
        struct omap_clk *c;
        u32 cpu_clkflg = CK_3XXX;
 
-       if (cpu_is_omap34xx()) {
+       if (cpu_is_omap3517()) {
+               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+               cpu_clkflg |= CK_3517;
+       } else if (cpu_is_omap3505()) {
+               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+               cpu_clkflg |= CK_3505;
+       } else if (cpu_is_omap34xx()) {
                cpu_mask = RATE_IN_3XXX;
                cpu_clkflg |= CK_343X;
 
@@ -3432,12 +3438,6 @@ int __init omap3xxx_clk_init(void)
                        cpu_mask |= RATE_IN_3430ES2PLUS;
                        cpu_clkflg |= CK_3430ES2;
                }
-       } else if (cpu_is_omap3517()) {
-               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
-               cpu_clkflg |= CK_3517;
-       } else if (cpu_is_omap3505()) {
-               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
-               cpu_clkflg |= CK_3505;
        }
 
        if (omap3_has_192mhz_clk())
index e8256a2ed8e782dd56a7bfa674b98886a9f63e71..9a879f9595098dd5e3fcb303b8d205af744d3a28 100644 (file)
@@ -284,8 +284,8 @@ static void __init omap3_check_revision(void)
                default:
                        omap_revision =  OMAP3630_REV_ES1_2;
                        omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
-                       break;
                }
+               break;
        default:
                /* Unknown default to latest silicon rev as default*/
                omap_revision =  OMAP3630_REV_ES1_2;
index 50fd749166433f7ebadfa492a4ea136718660e7e..06e64e1fc28a7e18957ff0bb9fe620e7487f7284 100644 (file)
@@ -177,7 +177,10 @@ omap_irq_base:     .word   0
                cmpne   \irqnr, \tmp
                cmpcs   \irqnr, \irqnr
                .endm
+#endif
+#endif /* MULTI_OMAP2 */
 
+#ifdef CONFIG_SMP
                /* We assume that irqstat (the raw value of the IRQ acknowledge
                 * register) is preserved from the macro above.
                 * If there is an IPI, we immediately signal end of interrupt
@@ -205,8 +208,7 @@ omap_irq_base:      .word   0
                streq   \irqstat, [\base, #GIC_CPU_EOI]
                cmp     \tmp, #0
                .endm
-#endif
-#endif /* MULTI_OMAP2 */
+#endif /* CONFIG_SMP */
 
                .macro  irq_prio_table
                .endm
index af3c20c8d3f9202e742068f8ba1e57c911e3c265..9e9f70e18e3c95436cf6b957e2791175a3cdc90a 100644 (file)
@@ -102,8 +102,7 @@ static void __init wakeup_secondary(void)
         * Send a 'sev' to wake the secondary core from WFE.
         * Drain the outstanding writes to memory
         */
-       dsb();
-       set_event();
+       dsb_sev();
        mb();
 }
 
index fb4994ad622ec469aaec34665a98af6250d29dc8..7b03426c72a317307db1df3931a5018443117b33 100644 (file)
@@ -480,7 +480,9 @@ void omap_sram_idle(void)
        }
 
        /* Disable IO-PAD and IO-CHAIN wakeup */
-       if (omap3_has_io_wakeup() && core_next_state < PWRDM_POWER_ON) {
+       if (omap3_has_io_wakeup() &&
+           (per_next_state < PWRDM_POWER_ON ||
+            core_next_state < PWRDM_POWER_ON)) {
                prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
                omap3_disable_io_chain();
        }
index 268a9bc6be8a22a4ca0ac4fed742d035753581a3..50d5939a78f1bdc8f318f08360243626a14a6105 100644 (file)
@@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 {
        int i;
        unsigned int freq;
index 27fa329d9a8b7a5677c2cf75e25900797130eb46..0a0d0fe99220d7f450e61dc04495d8dfe4492be3 100644 (file)
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
index 7139e0dc26d16062304bd72beca1f339e61899e7..4e1287070d219c32235aebadfbf2d97df3d059fb 100644 (file)
 #define GPIO46_CI_DD_7         MFP_CFG_DRV(GPIO46, AF0, DS04X)
 #define GPIO47_CI_DD_8         MFP_CFG_DRV(GPIO47, AF1, DS04X)
 #define GPIO48_CI_DD_9         MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
 #define GPIO49_CI_MCLK         MFP_CFG_DRV(GPIO49, AF0, DS04X)
 #define GPIO50_CI_PCLK         MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
 
 /* KEYPAD */
 #define GPIO3_KP_DKIN_6                MFP_CFG_LPM(GPIO3,   AF2, FLOAT)
index 315b0078a34d68e9f020157721cf26b836817993..54297eb0bf5ea437103cd104106a8b6340a895f9 100644 (file)
@@ -15,6 +15,6 @@
 #ifndef __ASM_ARCH_VMALLOC_H
 #define __ASM_ARCH_VMALLOC_H
 
-#define VMALLOC_END      (0xE0000000)
+#define VMALLOC_END    0xE0000000UL
 
 #endif /* __ASM_ARCH_VMALLOC_H */
index 7411ef3711a6e047ad37ba8a2fe617f8381de646..bc0e913898642ef3f374188f4d928cc56a83f0a5 100644 (file)
@@ -15,6 +15,6 @@
 #ifndef __ASM_ARCH_VMALLOC_H
 #define __ASM_ARCH_VMALLOC_H
 
-#define VMALLOC_END      (0xE0000000)
+#define VMALLOC_END    0xE0000000UL
 
 #endif /* __ASM_ARCH_VMALLOC_H */
index 16df257b1dce7a21610afe84b8be1f4b9011dc0c..e3f0eebf5205b592724f9de4b5cd5fda2670396c 100644 (file)
@@ -12,6 +12,6 @@
 #ifndef __ASM_ARCH_VMALLOC_H
 #define __ASM_ARCH_VMALLOC_H
 
-#define VMALLOC_END      (0xE0000000)
+#define VMALLOC_END    0xE0000000UL
 
 #endif /* __ASM_ARCH_VMALLOC_H */
index be3333688c20d98015eba94704c1ad84502d7b6c..f5c83f02c18efe57aed0b021779d160469dcc86f 100644 (file)
@@ -12,6 +12,6 @@
 #ifndef __ASM_ARCH_VMALLOC_H
 #define __ASM_ARCH_VMALLOC_H
 
-#define VMALLOC_END      (0xE0000000)
+#define VMALLOC_END    0xE0000000UL
 
 #endif /* __ASM_ARCH_VMALLOC_H */
index 58f515e0747ef8069eeca119707b30fe0efa18c1..df9a28808323e44efed82fe7d67b880180dfa6b6 100644 (file)
@@ -17,6 +17,6 @@
 #ifndef __ASM_ARCH_VMALLOC_H
 #define __ASM_ARCH_VMALLOC_H __FILE__
 
-#define VMALLOC_END      (0xE0000000)
+#define VMALLOC_END    (0xE0000000UL)
 
 #endif /* __ASM_ARCH_VMALLOC_H */
index 77f2b4d85e6bb09a91bcdcb0ef32e16afda16f84..26a0f03df8eaf2ae85323cda81e3b1044ec66d97 100644 (file)
@@ -30,6 +30,16 @@ static struct clk clk_sclk_hdmi27m = {
        .rate           = 27000000,
 };
 
+static int s5pv310_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable);
+}
+
+static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
+}
+
 /* Core list of CMU_CPU side */
 
 static struct clksrc_clk clk_mout_apll = {
@@ -39,6 +49,14 @@ static struct clksrc_clk clk_mout_apll = {
        },
        .sources        = &clk_src_apll,
        .reg_src        = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 },
+};
+
+static struct clksrc_clk clk_sclk_apll = {
+       .clk    = {
+               .name           = "sclk_apll",
+               .id             = -1,
+               .parent         = &clk_mout_apll.clk,
+       },
        .reg_div        = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 },
 };
 
@@ -61,7 +79,7 @@ static struct clksrc_clk clk_mout_mpll = {
 };
 
 static struct clk *clkset_moutcore_list[] = {
-       [0] = &clk_mout_apll.clk,
+       [0] = &clk_sclk_apll.clk,
        [1] = &clk_mout_mpll.clk,
 };
 
@@ -154,7 +172,7 @@ static struct clksrc_clk clk_pclk_dbg = {
 
 static struct clk *clkset_corebus_list[] = {
        [0] = &clk_mout_mpll.clk,
-       [1] = &clk_mout_apll.clk,
+       [1] = &clk_sclk_apll.clk,
 };
 
 static struct clksrc_sources clkset_mout_corebus = {
@@ -220,7 +238,7 @@ static struct clksrc_clk clk_pclk_acp = {
 
 static struct clk *clkset_aclk_top_list[] = {
        [0] = &clk_mout_mpll.clk,
-       [1] = &clk_mout_apll.clk,
+       [1] = &clk_sclk_apll.clk,
 };
 
 static struct clksrc_sources clkset_aclk_200 = {
@@ -321,11 +339,6 @@ static struct clksrc_clk clk_sclk_vpll = {
        .reg_src        = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
 };
 
-static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
-{
-       return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
-}
-
 static struct clk init_clocks_disable[] = {
        {
                .name           = "timers",
@@ -337,7 +350,37 @@ static struct clk init_clocks_disable[] = {
 };
 
 static struct clk init_clocks[] = {
-       /* Nothing here yet */
+       {
+               .name           = "uart",
+               .id             = 0,
+               .enable         = s5pv310_clk_ip_peril_ctrl,
+               .ctrlbit        = (1 << 0),
+       }, {
+               .name           = "uart",
+               .id             = 1,
+               .enable         = s5pv310_clk_ip_peril_ctrl,
+               .ctrlbit        = (1 << 1),
+       }, {
+               .name           = "uart",
+               .id             = 2,
+               .enable         = s5pv310_clk_ip_peril_ctrl,
+               .ctrlbit        = (1 << 2),
+       }, {
+               .name           = "uart",
+               .id             = 3,
+               .enable         = s5pv310_clk_ip_peril_ctrl,
+               .ctrlbit        = (1 << 3),
+       }, {
+               .name           = "uart",
+               .id             = 4,
+               .enable         = s5pv310_clk_ip_peril_ctrl,
+               .ctrlbit        = (1 << 4),
+       }, {
+               .name           = "uart",
+               .id             = 5,
+               .enable         = s5pv310_clk_ip_peril_ctrl,
+               .ctrlbit        = (1 << 5),
+       }
 };
 
 static struct clk *clkset_group_list[] = {
@@ -359,8 +402,8 @@ static struct clksrc_clk clksrcs[] = {
                .clk    = {
                        .name           = "uclk1",
                        .id             = 0,
+                       .enable         = s5pv310_clksrc_mask_peril0_ctrl,
                        .ctrlbit        = (1 << 0),
-                       .enable         = s5pv310_clk_ip_peril_ctrl,
                },
                .sources = &clkset_group,
                .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 },
@@ -369,8 +412,8 @@ static struct clksrc_clk clksrcs[] = {
                .clk            = {
                        .name           = "uclk1",
                        .id             = 1,
-                       .enable         = s5pv310_clk_ip_peril_ctrl,
-                       .ctrlbit        = (1 << 1),
+                       .enable         = s5pv310_clksrc_mask_peril0_ctrl,
+                       .ctrlbit        = (1 << 4),
                },
                .sources = &clkset_group,
                .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 },
@@ -379,8 +422,8 @@ static struct clksrc_clk clksrcs[] = {
                .clk            = {
                        .name           = "uclk1",
                        .id             = 2,
-                       .enable         = s5pv310_clk_ip_peril_ctrl,
-                       .ctrlbit        = (1 << 2),
+                       .enable         = s5pv310_clksrc_mask_peril0_ctrl,
+                       .ctrlbit        = (1 << 8),
                },
                .sources = &clkset_group,
                .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 },
@@ -389,8 +432,8 @@ static struct clksrc_clk clksrcs[] = {
                .clk            = {
                        .name           = "uclk1",
                        .id             = 3,
-                       .enable         = s5pv310_clk_ip_peril_ctrl,
-                       .ctrlbit        = (1 << 3),
+                       .enable         = s5pv310_clksrc_mask_peril0_ctrl,
+                       .ctrlbit        = (1 << 12),
                },
                .sources = &clkset_group,
                .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 },
@@ -399,7 +442,7 @@ static struct clksrc_clk clksrcs[] = {
                .clk            = {
                        .name           = "sclk_pwm",
                        .id             = -1,
-                       .enable         = s5pv310_clk_ip_peril_ctrl,
+                       .enable         = s5pv310_clksrc_mask_peril0_ctrl,
                        .ctrlbit        = (1 << 24),
                },
                .sources = &clkset_group,
@@ -411,6 +454,7 @@ static struct clksrc_clk clksrcs[] = {
 /* Clock initialization code */
 static struct clksrc_clk *sysclks[] = {
        &clk_mout_apll,
+       &clk_sclk_apll,
        &clk_mout_epll,
        &clk_mout_mpll,
        &clk_moutcore,
@@ -470,11 +514,11 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
        apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON0), pll_4508);
        mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON0), pll_4508);
        epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON0),
-                               __raw_readl(S5P_EPLL_CON1), pll_4500);
+                               __raw_readl(S5P_EPLL_CON1), pll_4600);
 
        vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
        vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
-                               __raw_readl(S5P_VPLL_CON1), pll_4502);
+                               __raw_readl(S5P_VPLL_CON1), pll_4650);
 
        clk_fout_apll.rate = apll;
        clk_fout_mpll.rate = mpll;
index 196c9f12ed85719be8bf757bc19b2ac6b092f68a..e5b261a99ab2678e5860d9c0055d493137ef1e42 100644 (file)
@@ -45,6 +45,16 @@ static struct map_desc s5pv310_iodesc[] __initdata = {
                .pfn            = __phys_to_pfn(S5PV310_PA_L2CC),
                .length         = SZ_4K,
                .type           = MT_DEVICE,
+       }, {
+               .virtual        = (unsigned long)S5P_VA_SYSRAM,
+               .pfn            = __phys_to_pfn(S5PV310_PA_SYSRAM),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE,
+       }, {
+               .virtual        = (unsigned long)S5P_VA_CMU,
+               .pfn            = __phys_to_pfn(S5PV310_PA_CMU),
+               .length         = SZ_128K,
+               .type           = MT_DEVICE,
        },
 };
 
index 56885ca3773cb2c85d275ecb11e6bce284de2888..4cdedda6e652b61297c877ac2c0205b81b9112ad 100644 (file)
 
 #include <plat/irqs.h>
 
-/* Private Peripheral Interrupt */
+/* PPI: Private Peripheral Interrupt */
+
 #define IRQ_PPI(x)             S5P_IRQ(x+16)
 
 #define IRQ_LOCALTIMER         IRQ_PPI(13)
 
-/* Shared Peripheral Interrupt */
+/* SPI: Shared Peripheral Interrupt */
+
 #define IRQ_SPI(x)             S5P_IRQ(x+32)
 
 #define IRQ_EINT0              IRQ_SPI(40)
@@ -36,7 +38,7 @@
 #define IRQ_PCIE               IRQ_SPI(50)
 #define IRQ_SYSTEM_TIMER       IRQ_SPI(51)
 #define IRQ_MFC                        IRQ_SPI(52)
-#define IRQ_WTD                        IRQ_SPI(53)
+#define IRQ_WDT                        IRQ_SPI(53)
 #define IRQ_AUDIO_SS           IRQ_SPI(54)
 #define IRQ_AC97               IRQ_SPI(55)
 #define IRQ_SPDIF              IRQ_SPI(56)
@@ -67,8 +69,9 @@
 #define IRQ_IIC                        COMBINER_IRQ(27, 0)
 
 /* Set the default NR_IRQS */
+
 #define NR_IRQS                        COMBINER_IRQ(MAX_COMBINER_NR, 0)
 
 #define MAX_COMBINER_NR                39
 
-#endif /* ASM_ARCH_IRQS_H */
+#endif /* __ASM_ARCH_IRQS_H */
index 87697c9fca5b47757ddcd24c1510f2783c5ca8f1..213e1101a3b325b60ada7c5bc5014035c1d8e74b 100644 (file)
 
 #include <plat/map-s5p.h>
 
+#define S5PV310_PA_SYSRAM              (0x02025000)
+
 #define S5PV310_PA_CHIPID              (0x10000000)
 #define S5P_PA_CHIPID                  S5PV310_PA_CHIPID
 
 #define S5PV310_PA_SYSCON              (0x10020000)
 #define S5P_PA_SYSCON                  S5PV310_PA_SYSCON
 
+#define S5PV310_PA_CMU                 (0x10030000)
+
 #define S5PV310_PA_WATCHDOG            (0x10060000)
 
 #define S5PV310_PA_COMBINER            (0x10448000)
 #define S5PV310_PA_GIC_DIST            (0x10501000)
 #define S5PV310_PA_L2CC                        (0x10502000)
 
-#define S5PV310_PA_GPIO                        (0x11000000)
-#define S5P_PA_GPIO                    S5PV310_PA_GPIO
+#define S5PV310_PA_GPIO1               (0x11400000)
+#define S5PV310_PA_GPIO2               (0x11000000)
+#define S5PV310_PA_GPIO3               (0x03860000)
+#define S5P_PA_GPIO                    S5PV310_PA_GPIO1
+
+#define S5PV310_PA_HSMMC(x)            (0x12510000 + ((x) * 0x10000))
 
 #define S5PV310_PA_UART                        (0x13800000)
 
 
 /* compatibiltiy defines. */
 #define S3C_PA_UART                    S5PV310_PA_UART
+#define S3C_PA_HSMMC0                  S5PV310_PA_HSMMC(0)
+#define S3C_PA_HSMMC1                  S5PV310_PA_HSMMC(1)
+#define S3C_PA_HSMMC2                  S5PV310_PA_HSMMC(2)
+#define S3C_PA_HSMMC3                  S5PV310_PA_HSMMC(3)
 #define S3C_PA_IIC                     S5PV310_PA_IIC0
 #define S3C_PA_WDT                     S5PV310_PA_WATCHDOG
 
index 59e3a7e94d801c86e99dc6683466876d5b948567..4013553cd9be8de219ffb3cfbd51905b35f517c7 100644 (file)
 
 #include <mach/map.h>
 
-#define S5P_CLKREG(x)                  (S3C_VA_SYS + (x))
+#define S5P_CLKREG(x)                  (S5P_VA_CMU + (x))
 
 #define S5P_INFORM0                    S5P_CLKREG(0x800)
 
-#define S5P_EPLL_CON0                  S5P_CLKREG(0x1C110)
-#define S5P_EPLL_CON1                  S5P_CLKREG(0x1C114)
-#define S5P_VPLL_CON0                  S5P_CLKREG(0x1C120)
-#define S5P_VPLL_CON1                  S5P_CLKREG(0x1C124)
+#define S5P_EPLL_CON0                  S5P_CLKREG(0x0C110)
+#define S5P_EPLL_CON1                  S5P_CLKREG(0x0C114)
+#define S5P_VPLL_CON0                  S5P_CLKREG(0x0C120)
+#define S5P_VPLL_CON1                  S5P_CLKREG(0x0C124)
 
-#define S5P_CLKSRC_TOP0                        S5P_CLKREG(0x1C210)
-#define S5P_CLKSRC_TOP1                        S5P_CLKREG(0x1C214)
+#define S5P_CLKSRC_TOP0                        S5P_CLKREG(0x0C210)
+#define S5P_CLKSRC_TOP1                        S5P_CLKREG(0x0C214)
 
-#define S5P_CLKSRC_PERIL0              S5P_CLKREG(0x1C250)
+#define S5P_CLKSRC_PERIL0              S5P_CLKREG(0x0C250)
 
-#define S5P_CLKDIV_TOP                 S5P_CLKREG(0x1C510)
+#define S5P_CLKDIV_TOP                 S5P_CLKREG(0x0C510)
 
-#define S5P_CLKDIV_PERIL0              S5P_CLKREG(0x1C550)
-#define S5P_CLKDIV_PERIL1              S5P_CLKREG(0x1C554)
-#define S5P_CLKDIV_PERIL2              S5P_CLKREG(0x1C558)
-#define S5P_CLKDIV_PERIL3              S5P_CLKREG(0x1C55C)
-#define S5P_CLKDIV_PERIL4              S5P_CLKREG(0x1C560)
-#define S5P_CLKDIV_PERIL5              S5P_CLKREG(0x1C564)
+#define S5P_CLKDIV_PERIL0              S5P_CLKREG(0x0C550)
+#define S5P_CLKDIV_PERIL1              S5P_CLKREG(0x0C554)
+#define S5P_CLKDIV_PERIL2              S5P_CLKREG(0x0C558)
+#define S5P_CLKDIV_PERIL3              S5P_CLKREG(0x0C55C)
+#define S5P_CLKDIV_PERIL4              S5P_CLKREG(0x0C560)
+#define S5P_CLKDIV_PERIL5              S5P_CLKREG(0x0C564)
 
-#define S5P_CLKGATE_IP_PERIL           S5P_CLKREG(0x1C950)
+#define S5P_CLKSRC_MASK_PERIL0         S5P_CLKREG(0x0C350)
 
-#define S5P_CLKSRC_CORE                        S5P_CLKREG(0x20200)
+#define S5P_CLKGATE_IP_PERIL           S5P_CLKREG(0x0C950)
 
-#define S5P_CLKDIV_CORE0               S5P_CLKREG(0x20500)
+#define S5P_CLKSRC_CORE                        S5P_CLKREG(0x10200)
+#define S5P_CLKDIV_CORE0               S5P_CLKREG(0x10500)
 
-#define S5P_APLL_LOCK                  S5P_CLKREG(0x24000)
-#define S5P_MPLL_LOCK                  S5P_CLKREG(0x24004)
-#define S5P_APLL_CON0                  S5P_CLKREG(0x24100)
-#define S5P_APLL_CON1                  S5P_CLKREG(0x24104)
-#define S5P_MPLL_CON0                  S5P_CLKREG(0x24108)
-#define S5P_MPLL_CON1                  S5P_CLKREG(0x2410C)
+#define S5P_APLL_LOCK                  S5P_CLKREG(0x14000)
+#define S5P_MPLL_LOCK                  S5P_CLKREG(0x14004)
+#define S5P_APLL_CON0                  S5P_CLKREG(0x14100)
+#define S5P_APLL_CON1                  S5P_CLKREG(0x14104)
+#define S5P_MPLL_CON0                  S5P_CLKREG(0x14108)
+#define S5P_MPLL_CON1                  S5P_CLKREG(0x1410C)
 
-#define S5P_CLKSRC_CPU                 S5P_CLKREG(0x24200)
-#define S5P_CLKMUX_STATCPU             S5P_CLKREG(0x24400)
+#define S5P_CLKSRC_CPU                 S5P_CLKREG(0x14200)
+#define S5P_CLKMUX_STATCPU             S5P_CLKREG(0x14400)
 
-#define S5P_CLKDIV_CPU                 S5P_CLKREG(0x24500)
-#define S5P_CLKDIV_STATCPU             S5P_CLKREG(0x24600)
+#define S5P_CLKDIV_CPU                 S5P_CLKREG(0x14500)
+#define S5P_CLKDIV_STATCPU             S5P_CLKREG(0x14600)
 
-#define S5P_CLKGATE_SCLKCPU            S5P_CLKREG(0x24800)
+#define S5P_CLKGATE_SCLKCPU            S5P_CLKREG(0x14800)
 
 #endif /* __ASM_ARCH_REGS_CLOCK_H */
index 3f565ebb7daa940dea8d199e51d7d1eda43d7fb2..256f221edf3aca654d59000d3016cdbabe1f80c1 100644 (file)
@@ -17,6 +17,6 @@
 #ifndef __ASM_ARCH_VMALLOC_H
 #define __ASM_ARCH_VMALLOC_H __FILE__
 
-#define VMALLOC_END      (0xF0000000)
+#define VMALLOC_END    (0xF0000000UL)
 
 #endif /* __ASM_ARCH_VMALLOC_H */
index fe9469abd0066190286d1e6dd382bb18073e24a7..d357c198edee308112bc3633256376ca69720abb 100644 (file)
@@ -187,6 +187,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                 * until it receives a soft interrupt, and then the
                 * secondary CPU branches to this address.
                 */
-       __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_INFORM0);
+       __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
        }
 }
index 5e16b4c692222a4a45d5728cec5b4970cae495e0..ae416fe7daf2e61b09f683c9be36ebc57c105c62 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o pm_runtime.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 23d472f9525e6a160c97cbf8adc21c505819fb05..95935c83c30654ee94d301e94086680475a6ed67 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
 #include <linux/sh_clk.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
 
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_ocr_mask  = MMC_VDD_165_195,
+       .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
 
 static struct platform_device fsi_device = {
        .name           = "sh_fsi2",
-       .id             = 0,
+       .id             = -1,
        .num_resources  = ARRAY_SIZE(fsi_resources),
        .resource       = fsi_resources,
        .dev    = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
        },
 };
 
+static struct gpio_led ap4evb_leds[] = {
+       {
+               .name                   = "led4",
+               .gpio                   = GPIO_PORT185,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led2",
+               .gpio                   = GPIO_PORT186,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led3",
+               .gpio                   = GPIO_PORT187,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led1",
+               .gpio                   = GPIO_PORT188,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+       .num_leds = ARRAY_SIZE(ap4evb_leds),
+       .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+       .name = "leds-gpio",
+       .id = 0,
+       .dev = {
+               .platform_data  = &ap4evb_leds_pdata,
+       },
+};
+
 static struct platform_device *ap4evb_devices[] __initdata = {
+       &leds_device,
        &nor_flash_device,
        &smc911x_device,
        &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_FN_CS5A,      NULL);
        gpio_request(GPIO_FN_IRQ6_39,   NULL);
 
-       /* enable LED 1 - 4 */
-       gpio_request(GPIO_PORT185, NULL);
-       gpio_request(GPIO_PORT186, NULL);
-       gpio_request(GPIO_PORT187, NULL);
-       gpio_request(GPIO_PORT188, NULL);
-       gpio_direction_output(GPIO_PORT185, 1);
-       gpio_direction_output(GPIO_PORT186, 1);
-       gpio_direction_output(GPIO_PORT187, 1);
-       gpio_direction_output(GPIO_PORT188, 1);
-       gpio_export(GPIO_PORT185, 0);
-       gpio_export(GPIO_PORT186, 0);
-       gpio_export(GPIO_PORT187, 0);
-       gpio_export(GPIO_PORT188, 0);
-
        /* enable Debug switch (S6) */
        gpio_request(GPIO_PORT32, NULL);
        gpio_request(GPIO_PORT33, NULL);
index fb4e9b1d788e464922ba2345d60fb43b8e1173d2..759468992ad287ff3f40b2f2e92e19d99734c94a 100644 (file)
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
 
 struct clk pllc2_clk = {
        .ops            = &pllc2_clk_ops,
-       .flags          = CLK_ENABLE_ON_INIT,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
        .parent_table   = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
 
 enum { MSTP001,
        MSTP131, MSTP130,
-       MSTP129, MSTP128,
+       MSTP129, MSTP128, MSTP127, MSTP126,
        MSTP118, MSTP117, MSTP116,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
        [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
        [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
        [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
        [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
        [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
        [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
index b7c705a213a2a1400e180df83d9e5a6f67649db4..6b7c7c42bc8fc529678fe7e77d01878299d049c4 100644 (file)
@@ -1,8 +1,10 @@
 /*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
  *
  * Copyright (C) 2010  Magnus Damm
  *
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644 (file)
index 0000000..94912d3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ *  Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+       unsigned long flags;
+       struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+       struct pm_runtime_data *prd = res;
+
+       dev_dbg(dev, "__devres_release()\n");
+
+       if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+               clk_disable(prd->clk);
+
+       if (test_bit(BIT_ACTIVE, &prd->flags))
+               clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+       return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+                                    struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+               prd->clk = clk_get(dev, NULL);
+               if (!IS_ERR(prd->clk)) {
+                       set_bit(BIT_ACTIVE, &prd->flags);
+                       dev_info(dev, "clocks managed by runtime pm\n");
+               }
+       }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+                                   struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+               dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+       platform_pm_runtime_bug(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_disable(prd->clk);
+               clear_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+       platform_pm_runtime_init(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_enable(prd->clk);
+               set_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+       /* suspend synchronously to disable clocks immediately */
+       return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pm_runtime_data *prd;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       if (action == BUS_NOTIFY_BIND_DRIVER) {
+               prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+               if (prd)
+                       devres_add(dev, prd);
+               else
+                       dev_err(dev, "unable to alloc memory for runtime pm\n");
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct clk *clk;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_enable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced on\n");
+               }
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_disable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced off\n");
+               }
+               break;
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+       .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+       bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+}
+core_initcall(sh_pm_runtime_init);
index 05e78dd9b50ca22ef7b20e91f8a21e6e92c8021d..9e305de56be9ac28ab023dbfc4f748c5f260bec1 100644 (file)
@@ -91,10 +91,8 @@ static void __init tegra_harmony_fixup(struct machine_desc *desc,
 {
        mi->nr_banks = 2;
        mi->bank[0].start = PHYS_OFFSET;
-       mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
        mi->bank[0].size = 448 * SZ_1M;
        mi->bank[1].start = SZ_512M;
-       mi->bank[1].node = PHYS_TO_NID(SZ_512M);
        mi->bank[1].size = SZ_512M;
 }
 
index 267a141730d9c94fa59a7c0acbd5bbc6426e79d2..fd6aa65b2dc6ab433cc7a3f2ca45cf9128294f38 100644 (file)
@@ -23,6 +23,6 @@
 
 #include <asm/sizes.h>
 
-#define VMALLOC_END        0xFE000000
+#define VMALLOC_END        0xFE000000UL
 
 #endif
index 33c3f570aaa06c2a56f6a6d70eb7f558a883b0b1..a0a2928ae4dd7670a1342863040791833b57dbab 100644 (file)
@@ -398,7 +398,7 @@ config CPU_V6
 # ARMv6k
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
-       depends on CPU_V6
+       depends on CPU_V6 || CPU_V7
        default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
index c704eed63c5ddba4c5f849f7ab7b6420008cef21..4bc43e535d3baadc657df9af504078ff1ae00570 100644 (file)
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
                        }
                } while (size -= PAGE_SIZE);
 
+               dsb();
+
                return (void *)c->vm_start;
        }
        return NULL;
index 0527e65318f4a647b5b00192ce08ba47368ce5f8..6785db4179b84ccd925f9112cd48e9be71668e7c 100644 (file)
@@ -43,6 +43,7 @@ config ARCH_MXC91231
 config ARCH_MX5
        bool "MX5-based"
        select CPU_V7
+       select ARM_L1_CACHE_SHIFT_6
        help
          This enables support for systems based on the Freescale i.MX51 family
 
index 634e3f4c454df222728aa678bdcd657967286dbc..656acb45d434b7333e0864798fb6b69538387701 100644 (file)
@@ -37,9 +37,9 @@
  * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
  */
 
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
 extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
 extern void eukrea_mbimx51_baseboard_init(void);
 
 #endif
index b3da9aad4295704ef9ea8a4a43c95127d74e6d9e..3703ab28257fbbb55d3db89bee56877eebb38345 100644 (file)
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
                return -EAGAIN;
 
        for (i = 0; i < 4; i++) {
-               v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
-               __raw_writel(v, TZIC_WAKEUP0(i));
+               v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+                       wakeup_intr[i];
+               __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
        }
 
        return 0;
index 6a3ff65c030350e121649f5bed7cb9d9f8d76724..5177a9c5a25acb62966f14763cf6aff842ee5a11 100644 (file)
 
 #include <asm/hardware/gic.h>
 
-/*
- * set_event() is used to wake up secondary core from wfe using sev. ROM
- * code puts the second core into wfe(standby).
- *
- */
-#define set_event()    __asm__ __volatile__ ("sev" : : : "memory")
-
 /* Needed for secondary core boot */
 extern void omap_secondary_startup(void);
 extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
index 0732c6c8d511979e354cced2cd6987889702a1e7..ef32686feef9431ab00f42e4a2a0e2d7656af783 100644 (file)
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
 
 static int __devinit pwm_probe(struct platform_device *pdev)
 {
-       struct platform_device_id *id = platform_get_device_id(pdev);
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct pwm_device *pwm, *secondary = NULL;
        struct resource *r;
        int ret = 0;
index 54e9fb9d315e949129b4754b8b99a195a7771aee..c4ff88bf6477d2797b6ba578c768b0edadaba073 100644 (file)
@@ -17,6 +17,7 @@
 #define S5P_VA_GPIO            S3C_ADDR(0x00500000)
 #define S5P_VA_SYSTIMER                S3C_ADDR(0x01200000)
 #define S5P_VA_SROMC           S3C_ADDR(0x01100000)
+#define S5P_VA_SYSRAM          S3C_ADDR(0x01180000)
 
 #define S5P_VA_COMBINER_BASE   S3C_ADDR(0x00600000)
 #define S5P_VA_COMBINER(x)     (S5P_VA_COMBINER_BASE + ((x) >> 2) * 0x10)
@@ -29,6 +30,7 @@
 #define S5P_VA_GIC_DIST                S5P_VA_COREPERI(0x1000)
 
 #define S5P_VA_L2CC            S3C_ADDR(0x00900000)
+#define S5P_VA_CMU             S3C_ADDR(0x00920000)
 
 #define S5P_VA_UART(x)         (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
 #define S5P_VA_UART0           S5P_VA_UART(0)
index 48cbdcb6bbd4288929f31bef94da3691b160a489..55590a4d87c932984404d1df13ca4c296c9d7117 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2622,7 +2622,7 @@ kraken                    MACH_KRAKEN             KRAKEN                  2634
 gw2388                 MACH_GW2388             GW2388                  2635
 jadecpu                        MACH_JADECPU            JADECPU                 2636
 carlisle               MACH_CARLISLE           CARLISLE                2637
-lux_sf9                        MACH_LUX_SFT9           LUX_SFT9                2638
+lux_sf9                        MACH_LUX_SF9            LUX_SF9                 2638
 nemid_tb               MACH_NEMID_TB           NEMID_TB                2639
 terrier                        MACH_TERRIER            TERRIER                 2640
 turbot                 MACH_TURBOT             TURBOT                  2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr        MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
 netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
 wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
+lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
+spica                  MACH_SPICA              SPICA                   2968
+gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
+bipnet                 MACH_BIPNET             BIPNET                  2970
+overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
+davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
+pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
+ptx7545                        MACH_PTX7545            PTX7545                 2974
+tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
+omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
+flyer                  MACH_FLYER              FLYER                   2978
+tornado3240            MACH_TORNADO3240        TORNADO3240             2979
+soli_01                        MACH_SOLI_01            SOLI_01                 2980
+omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
+helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
+netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
+ssc                    MACH_SSC                SSC                     2984
+premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
+wasabi                 MACH_WASABI             WASABI                  2986
+vivow                  MACH_VIVOW              VIVOW                   2987
+mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
+universal              MACH_UNIVERSAL          UNIVERSAL               2989
+real6410               MACH_REAL6410           REAL6410                2990
+spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
+ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
+omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
+thebe                  MACH_THEBE              THEBE                   2994
+rv082                  MACH_RV082              RV082                   2995
+armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
+tjinc1000              MACH_TJINC1000          TJINC1000               2997
+dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
+ax8008                 MACH_AX8008             AX8008                  2999
+gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
+pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
+ea20                   MACH_EA20               EA20                    3002
+awm2                   MACH_AWM2               AWM2                    3003
+ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
+tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
+tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
+rubys                  MACH_RUBYS              RUBYS                   3008
+aquarius               MACH_AQUARIUS           AQUARIUS                3009
+mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
+mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
+lswxl                  MACH_LSWXL              LSWXL                   3012
+dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
+sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
+jocpu550               MACH_JOCPU550           JOCPU550                3015
+msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
+msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
+yanomami               MACH_YANOMAMI           YANOMAMI                3018
+gta04                  MACH_GTA04              GTA04                   3019
+cm_a510                        MACH_CM_A510            CM_A510                 3020
+omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
+kx33xx                 MACH_KX33XX             KX33XX                  3022
+ptx7510                        MACH_PTX7510            PTX7510                 3023
+top9000                        MACH_TOP9000            TOP9000                 3024
+teenote                        MACH_TEENOTE            TEENOTE                 3025
+ts3                    MACH_TS3                TS3                     3026
+a0                     MACH_A0                 A0                      3027
+fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
+fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
+frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
+remus                  MACH_REMUS              REMUS                   3031
+at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
+at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
+kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
+oratisrouter           MACH_ORATISROUTER       ORATISROUTER            3035
+armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
+spdm                   MACH_SPDM               SPDM                    3037
+gtib                   MACH_GTIB               GTIB                    3038
+dgm3240                        MACH_DGM3240            DGM3240                 3039
+atlas_i_lpe            MACH_ATLAS_I_LPE        ATLAS_I_LPE             3040
+htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
+tricorder              MACH_TRICORDER          TRICORDER               3042
+tx28                   MACH_TX28               TX28                    3043
+bstbrd                 MACH_BSTBRD             BSTBRD                  3044
+pwb3090                        MACH_PWB3090            PWB3090                 3045
+idea6410               MACH_IDEA6410           IDEA6410                3046
+qbc9263                        MACH_QBC9263            QBC9263                 3047
+borabora               MACH_BORABORA           BORABORA                3048
+valdez                 MACH_VALDEZ             VALDEZ                  3049
+ls9g20                 MACH_LS9G20             LS9G20                  3050
+mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
+s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
+controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
+tin307                 MACH_TIN307             TIN307                  3054
+tin510                 MACH_TIN510             TIN510                  3055
+bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
+tem3x30                        MACH_TEM3X30            TEM3X30                 3058
+harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
+msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
+spear900               MACH_SPEAR900           SPEAR900                3061
+pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
index 9626cf7e4251d7c220534acc7343d4ffc16f3194..d27600c262c2b18427b0d28feed430440cbd6452 100644 (file)
@@ -115,12 +115,6 @@ struct sport_register {
 
 #endif
 
-/* Workaround defBF*.h SPORT MMRs till they get cleansed */
-#undef DTYPE_NORM
-#undef SLEN
-#undef SP_WOFF
-#undef SP_WSIZE
-
 /* SPORT_TCR1 Masks */
 #define TSPEN          0x0001  /* TX enable */
 #define ITCLK          0x0002  /* Internal TX Clock Select */
index d5872cd967ab3e793fd6f6a55e0f296b138444e3..3f7ef4d97791514c9d4eab13d17baffc78efb5f3 100644 (file)
@@ -22,7 +22,9 @@
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
+
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
@@ -115,7 +117,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
  * of bits set) of a N-bit word
  */
 
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
 {
        unsigned int res;
 
@@ -125,19 +127,20 @@ static inline unsigned int hweight32(unsigned int w)
        return res;
 }
 
-static inline unsigned int hweight64(__u64 w)
+static inline unsigned int __arch_hweight64(__u64 w)
 {
-       return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+       return __arch_hweight32((unsigned int)(w >> 32)) +
+              __arch_hweight32((unsigned int)w);
 }
 
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
 {
-       return hweight32(w & 0xffff);
+       return __arch_hweight32(w & 0xffff);
 }
 
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
 {
-       return hweight32(w & 0xff);
+       return __arch_hweight32(w & 0xff);
 }
 
 #endif                         /* _BLACKFIN_BITOPS_H */
index 22886cbdae7aa2b7c615bd800259600bc1148bf6..14fcd254b185a2706bd53c5bad024b4b749273af 100644 (file)
 #define __NR_rt_tgsigqueueinfo 368
 #define __NR_perf_event_open   369
 #define __NR_recvmmsg          370
+#define __NR_fanotify_init     371
+#define __NR_fanotify_mark     372
+#define __NR_prlimit64         373
 
-#define __NR_syscall           371
+#define __NR_syscall           374
 #define NR_syscalls            __NR_syscall
 
 /* Old optional stuff no one actually uses */
index 2bc8f4f980113b6fb0272d2cec839b1d84159ce9..037a51fd8e9382cca213271f289fcde90fc66644 100644 (file)
 #define PH6            0x0040
 #define PH7            0x0080
 
-
-/* *******************  SERIAL PORT MASKS  **************************************/
-/* SPORTx_TCR1 Masks                                                                                                                   */
-#define TSPEN          0x0001          /* Transmit Enable                                                              */
-#define ITCLK          0x0002          /* Internal Transmit Clock Select                               */
-#define DTYPE_NORM     0x0004          /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008          /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C          /* Compand Using A-Law                                                  */
-#define TLSBIT         0x0010          /* Transmit Bit Order                                                   */
-#define ITFS           0x0200          /* Internal Transmit Frame Sync Select                  */
-#define TFSR           0x0400          /* Transmit Frame Sync Required Select                  */
-#define DITFS          0x0800          /* Data-Independent Transmit Frame Sync Select  */
-#define LTFS           0x1000          /* Low Transmit Frame Sync Select                               */
-#define LATFS          0x2000          /* Late Transmit Frame Sync Select                              */
-#define TCKFE          0x4000          /* Clock Falling Edge Select                                    */
-
-/* SPORTx_TCR2 Masks and Macro                                                                                                 */
-#define SLEN(x)                ((x)&0x1F)      /* SPORT TX Word Length (2 - 31)                                */
-#define TXSE           0x0100          /* TX Secondary Enable                                                  */
-#define TSFSE          0x0200          /* Transmit Stereo Frame Sync Enable                    */
-#define TRFST          0x0400          /* Left/Right Order (1 = Right Channel 1st)             */
-
-/* SPORTx_RCR1 Masks                                                                                                                   */
-#define RSPEN          0x0001          /* Receive Enable                                                               */
-#define IRCLK          0x0002          /* Internal Receive Clock Select                                */
-#define DTYPE_NORM     0x0004          /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008          /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C          /* Compand Using A-Law                                                  */
-#define RLSBIT         0x0010          /* Receive Bit Order                                                    */
-#define IRFS           0x0200          /* Internal Receive Frame Sync Select                   */
-#define RFSR           0x0400          /* Receive Frame Sync Required Select                   */
-#define LRFS           0x1000          /* Low Receive Frame Sync Select                                */
-#define LARFS          0x2000          /* Late Receive Frame Sync Select                               */
-#define RCKFE          0x4000          /* Clock Falling Edge Select                                    */
-
-/* SPORTx_RCR2 Masks                                                                                                                   */
-#define SLEN(x)                ((x)&0x1F)      /* SPORT RX Word Length (2 - 31)                                */
-#define RXSE           0x0100          /* RX Secondary Enable                                                  */
-#define RSFSE          0x0200          /* RX Stereo Frame Sync Enable                                  */
-#define RRFST          0x0400          /* Right-First Data Order                                               */
-
-/* SPORTx_STAT Masks                                                                                                                   */
-#define RXNE           0x0001          /* Receive FIFO Not Empty Status                                */
-#define RUVF           0x0002          /* Sticky Receive Underflow Status                              */
-#define ROVF           0x0004          /* Sticky Receive Overflow Status                               */
-#define TXF                    0x0008          /* Transmit FIFO Full Status                                    */
-#define TUVF           0x0010          /* Sticky Transmit Underflow Status                             */
-#define TOVF           0x0020          /* Sticky Transmit Overflow Status                              */
-#define TXHRE          0x0040          /* Transmit Hold Register Empty                                 */
-
-/* SPORTx_MCMC1 Macros                                                                                                                 */
-#define SP_WOFF(x)     ((x) & 0x3FF)   /* Multichannel Window Offset Field                     */
-
-/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits                                           */
-#define SP_WSIZE(x)    (((((x)>>0x3)-1)&0xF) << 0xC)   /* Multichannel Window Size = (x/8)-1   */
-
-/* SPORTx_MCMC2 Masks                                                                                                                  */
-#define REC_BYPASS     0x0000          /* Bypass Mode (No Clock Recovery)                              */
-#define REC_2FROM4     0x0002          /* Recover 2 MHz Clock from 4 MHz Clock                 */
-#define REC_8FROM16    0x0003          /* Recover 8 MHz Clock from 16 MHz Clock                */
-#define MCDTXPE                0x0004          /* Multichannel DMA Transmit Packing                    */
-#define MCDRXPE                0x0008          /* Multichannel DMA Receive Packing                             */
-#define MCMEN          0x0010          /* Multichannel Frame Mode Enable                               */
-#define FSDR           0x0080          /* Multichannel Frame Sync to Data Relationship */
-#define MFD_0          0x0000          /* Multichannel Frame Delay = 0                                 */
-#define MFD_1          0x1000          /* Multichannel Frame Delay = 1                                 */
-#define MFD_2          0x2000          /* Multichannel Frame Delay = 2                                 */
-#define MFD_3          0x3000          /* Multichannel Frame Delay = 3                                 */
-#define MFD_4          0x4000          /* Multichannel Frame Delay = 4                                 */
-#define MFD_5          0x5000          /* Multichannel Frame Delay = 5                                 */
-#define MFD_6          0x6000          /* Multichannel Frame Delay = 6                                 */
-#define MFD_7          0x7000          /* Multichannel Frame Delay = 7                                 */
-#define MFD_8          0x8000          /* Multichannel Frame Delay = 8                                 */
-#define MFD_9          0x9000          /* Multichannel Frame Delay = 9                                 */
-#define MFD_10         0xA000          /* Multichannel Frame Delay = 10                                */
-#define MFD_11         0xB000          /* Multichannel Frame Delay = 11                                */
-#define MFD_12         0xC000          /* Multichannel Frame Delay = 12                                */
-#define MFD_13         0xD000          /* Multichannel Frame Delay = 13                                */
-#define MFD_14         0xE000          /* Multichannel Frame Delay = 14                                */
-#define MFD_15         0xF000          /* Multichannel Frame Delay = 15                                */
-
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
 /* EBIU_AMGCTL Masks                                                                                                                                   */
 #define AMCKEN                 0x0001          /* Enable CLKOUT                                                                        */
index f392af641657d78f5be0294cc162c4c294c4f5d2..645ba5c8077b108672818de20b143054a386aec3 100644 (file)
@@ -145,7 +145,6 @@ static struct mtd_partition partition_info[] = {
 };
 
 static struct bf5xx_nand_platform bf5xx_nand_platform = {
-       .page_size = NFC_PG_SIZE_256,
        .data_width = NFC_NWIDTH_8,
        .partitions = partition_info,
        .nr_partitions = ARRAY_SIZE(partition_info),
index 606eb36b9d6ea2ce31d9d9082d0c8de9ac5512d4..c975fe88eba387738ee1fb2dd838371d69f2a5c0 100644 (file)
@@ -149,7 +149,6 @@ static struct mtd_partition partition_info[] = {
 };
 
 static struct bf5xx_nand_platform bf5xx_nand_platform = {
-       .page_size = NFC_PG_SIZE_256,
        .data_width = NFC_NWIDTH_8,
        .partitions = partition_info,
        .nr_partitions = ARRAY_SIZE(partition_info),
index a05c967a24cfc22046fde2709da087a6c30bfef1..87b41e994ba31d24cce7fdb2528d254a9834e8f6 100644 (file)
@@ -234,7 +234,6 @@ static struct mtd_partition partition_info[] = {
 };
 
 static struct bf5xx_nand_platform bf5xx_nand_platform = {
-       .page_size = NFC_PG_SIZE_256,
        .data_width = NFC_NWIDTH_8,
        .partitions = partition_info,
        .nr_partitions = ARRAY_SIZE(partition_info),
index 5f97f01fcda61cf14d91f011606a2a4de57563c6..3e000756aacd0bcf3eb5ffd0c3ec78f75f55d857 100644 (file)
 #define PH14   0x4000
 #define PH15   0x8000
 
-
-/* *******************  SERIAL PORT MASKS  **************************************/
-/* SPORTx_TCR1 Masks                                                                                                                   */
-#define TSPEN          0x0001          /* Transmit Enable                                                              */
-#define ITCLK          0x0002          /* Internal Transmit Clock Select                               */
-#define DTYPE_NORM     0x0004          /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008          /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C          /* Compand Using A-Law                                                  */
-#define TLSBIT         0x0010          /* Transmit Bit Order                                                   */
-#define ITFS           0x0200          /* Internal Transmit Frame Sync Select                  */
-#define TFSR           0x0400          /* Transmit Frame Sync Required Select                  */
-#define DITFS          0x0800          /* Data-Independent Transmit Frame Sync Select  */
-#define LTFS           0x1000          /* Low Transmit Frame Sync Select                               */
-#define LATFS          0x2000          /* Late Transmit Frame Sync Select                              */
-#define TCKFE          0x4000          /* Clock Falling Edge Select                                    */
-
-/* SPORTx_TCR2 Masks and Macro                                                                                                 */
-#define SLEN(x)                ((x)&0x1F)      /* SPORT TX Word Length (2 - 31)                                */
-#define TXSE           0x0100          /* TX Secondary Enable                                                  */
-#define TSFSE          0x0200          /* Transmit Stereo Frame Sync Enable                    */
-#define TRFST          0x0400          /* Left/Right Order (1 = Right Channel 1st)             */
-
-/* SPORTx_RCR1 Masks                                                                                                                   */
-#define RSPEN          0x0001          /* Receive Enable                                                               */
-#define IRCLK          0x0002          /* Internal Receive Clock Select                                */
-#define DTYPE_NORM     0x0004          /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008          /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C          /* Compand Using A-Law                                                  */
-#define RLSBIT         0x0010          /* Receive Bit Order                                                    */
-#define IRFS           0x0200          /* Internal Receive Frame Sync Select                   */
-#define RFSR           0x0400          /* Receive Frame Sync Required Select                   */
-#define LRFS           0x1000          /* Low Receive Frame Sync Select                                */
-#define LARFS          0x2000          /* Late Receive Frame Sync Select                               */
-#define RCKFE          0x4000          /* Clock Falling Edge Select                                    */
-
-/* SPORTx_RCR2 Masks                                                                                                                   */
-#define SLEN(x)                ((x)&0x1F)      /* SPORT RX Word Length (2 - 31)                                */
-#define RXSE           0x0100          /* RX Secondary Enable                                                  */
-#define RSFSE          0x0200          /* RX Stereo Frame Sync Enable                                  */
-#define RRFST          0x0400          /* Right-First Data Order                                               */
-
-/* SPORTx_STAT Masks                                                                                                                   */
-#define RXNE           0x0001          /* Receive FIFO Not Empty Status                                */
-#define RUVF           0x0002          /* Sticky Receive Underflow Status                              */
-#define ROVF           0x0004          /* Sticky Receive Overflow Status                               */
-#define TXF                    0x0008          /* Transmit FIFO Full Status                                    */
-#define TUVF           0x0010          /* Sticky Transmit Underflow Status                             */
-#define TOVF           0x0020          /* Sticky Transmit Overflow Status                              */
-#define TXHRE          0x0040          /* Transmit Hold Register Empty                                 */
-
-/* SPORTx_MCMC1 Macros                                                                                                                 */
-#define SP_WOFF(x)     ((x) & 0x3FF)   /* Multichannel Window Offset Field                     */
-
-/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits                                           */
-#define SP_WSIZE(x)    (((((x)>>0x3)-1)&0xF) << 0xC)   /* Multichannel Window Size = (x/8)-1   */
-
-/* SPORTx_MCMC2 Masks                                                                                                                  */
-#define REC_BYPASS     0x0000          /* Bypass Mode (No Clock Recovery)                              */
-#define REC_2FROM4     0x0002          /* Recover 2 MHz Clock from 4 MHz Clock                 */
-#define REC_8FROM16    0x0003          /* Recover 8 MHz Clock from 16 MHz Clock                */
-#define MCDTXPE                0x0004          /* Multichannel DMA Transmit Packing                    */
-#define MCDRXPE                0x0008          /* Multichannel DMA Receive Packing                             */
-#define MCMEN          0x0010          /* Multichannel Frame Mode Enable                               */
-#define FSDR           0x0080          /* Multichannel Frame Sync to Data Relationship */
-#define MFD_0          0x0000          /* Multichannel Frame Delay = 0                                 */
-#define MFD_1          0x1000          /* Multichannel Frame Delay = 1                                 */
-#define MFD_2          0x2000          /* Multichannel Frame Delay = 2                                 */
-#define MFD_3          0x3000          /* Multichannel Frame Delay = 3                                 */
-#define MFD_4          0x4000          /* Multichannel Frame Delay = 4                                 */
-#define MFD_5          0x5000          /* Multichannel Frame Delay = 5                                 */
-#define MFD_6          0x6000          /* Multichannel Frame Delay = 6                                 */
-#define MFD_7          0x7000          /* Multichannel Frame Delay = 7                                 */
-#define MFD_8          0x8000          /* Multichannel Frame Delay = 8                                 */
-#define MFD_9          0x9000          /* Multichannel Frame Delay = 9                                 */
-#define MFD_10         0xA000          /* Multichannel Frame Delay = 10                                */
-#define MFD_11         0xB000          /* Multichannel Frame Delay = 11                                */
-#define MFD_12         0xC000          /* Multichannel Frame Delay = 12                                */
-#define MFD_13         0xD000          /* Multichannel Frame Delay = 13                                */
-#define MFD_14         0xE000          /* Multichannel Frame Delay = 14                                */
-#define MFD_15         0xF000          /* Multichannel Frame Delay = 15                                */
-
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
 /* EBIU_AMGCTL Masks                                                                                                                                   */
 #define AMCKEN                 0x0001          /* Enable CLKOUT                                                                        */
index e9ff491c095380396706add9fd80ed0789c7b3d4..04acf1ed10f96ccd1cad3d530b7b98610d11c0fe 100644 (file)
 #define IREN_P 0x01
 #define UCEN_P 0x00
 
-/* **********  SERIAL PORT MASKS  ********************** */
-
-/* SPORTx_TCR1 Masks */
-#define TSPEN    0x0001                /* TX enable  */
-#define ITCLK    0x0002                /* Internal TX Clock Select  */
-#define TDTYPE   0x000C                /* TX Data Formatting Select */
-#define DTYPE_NORM     0x0000          /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008          /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C          /* Compand Using A-Law                                                  */
-#define TLSBIT   0x0010                /* TX Bit Order */
-#define ITFS     0x0200                /* Internal TX Frame Sync Select  */
-#define TFSR     0x0400                /* TX Frame Sync Required Select  */
-#define DITFS    0x0800                /* Data Independent TX Frame Sync Select  */
-#define LTFS     0x1000                /* Low TX Frame Sync Select  */
-#define LATFS    0x2000                /* Late TX Frame Sync Select  */
-#define TCKFE    0x4000                /* TX Clock Falling Edge Select  */
-
-/* SPORTx_TCR2 Masks */
-#if defined(__ADSPBF531__) || defined(__ADSPBF532__) || \
-    defined(__ADSPBF533__)
-# define SLEN      0x001F      /*TX Word Length  */
-#else
-# define SLEN(x)               ((x)&0x1F)      /* SPORT TX Word Length (2 - 31)                                */
-#endif
-#define TXSE        0x0100     /*TX Secondary Enable */
-#define TSFSE       0x0200     /*TX Stereo Frame Sync Enable */
-#define TRFST       0x0400     /*TX Right-First Data Order  */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN    0x0001                /* RX enable  */
-#define IRCLK    0x0002                /* Internal RX Clock Select  */
-#define RDTYPE   0x000C                /* RX Data Formatting Select */
-#define DTYPE_NORM     0x0000          /* no companding                                                        */
-#define DTYPE_ULAW     0x0008          /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C          /* Compand Using A-Law                                                  */
-#define RLSBIT   0x0010                /* RX Bit Order */
-#define IRFS     0x0200                /* Internal RX Frame Sync Select  */
-#define RFSR     0x0400                /* RX Frame Sync Required Select  */
-#define LRFS     0x1000                /* Low RX Frame Sync Select  */
-#define LARFS    0x2000                /* Late RX Frame Sync Select  */
-#define RCKFE    0x4000                /* RX Clock Falling Edge Select  */
-
-/* SPORTx_RCR2 Masks */
-/* SLEN defined above */
-#define RXSE        0x0100     /*RX Secondary Enable */
-#define RSFSE       0x0200     /*RX Stereo Frame Sync Enable */
-#define RRFST       0x0400     /*Right-First Data Order  */
-
-/*SPORTx_STAT Masks */
-#define RXNE           0x0001  /*RX FIFO Not Empty Status */
-#define RUVF           0x0002  /*RX Underflow Status */
-#define ROVF           0x0004  /*RX Overflow Status */
-#define TXF            0x0008  /*TX FIFO Full Status */
-#define TUVF           0x0010  /*TX Underflow Status */
-#define TOVF           0x0020  /*TX Overflow Status */
-#define TXHRE          0x0040  /*TX Hold Register Empty */
-
-/*SPORTx_MCMC1 Masks */
-#define SP_WSIZE               0x0000F000      /*Multichannel Window Size Field */
-#define SP_WOFF                0x000003FF      /*Multichannel Window Offset Field */
-/* SPORTx_MCMC1 Macros                                                                                                                 */
-#define SET_SP_WOFF(x) ((x) & 0x3FF)   /* Multichannel Window Offset Field                     */
-/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits                                               */
-#define SET_SP_WSIZE(x)        (((((x)>>0x3)-1)&0xF) << 0xC)   /* Multichannel Window Size = (x/8)-1   */
-
-/*SPORTx_MCMC2 Masks */
-#define MCCRM          0x00000003      /*Multichannel Clock Recovery Mode */
-#define REC_BYPASS     0x0000          /* Bypass Mode (No Clock Recovery)                              */
-#define REC_2FROM4     0x0002          /* Recover 2 MHz Clock from 4 MHz Clock                 */
-#define REC_8FROM16    0x0003          /* Recover 8 MHz Clock from 16 MHz Clock                */
-#define MCDTXPE                0x00000004      /*Multichannel DMA Transmit Packing */
-#define MCDRXPE                0x00000008      /*Multichannel DMA Receive Packing */
-#define MCMEN          0x00000010      /*Multichannel Frame Mode Enable */
-#define FSDR           0x00000080      /*Multichannel Frame Sync to Data Relationship */
-#define MFD            0x0000F000      /*Multichannel Frame Delay    */
-#define MFD_0          0x0000          /* Multichannel Frame Delay = 0                                 */
-#define MFD_1          0x1000          /* Multichannel Frame Delay = 1                                 */
-#define MFD_2          0x2000          /* Multichannel Frame Delay = 2                                 */
-#define MFD_3          0x3000          /* Multichannel Frame Delay = 3                                 */
-#define MFD_4          0x4000          /* Multichannel Frame Delay = 4                                 */
-#define MFD_5          0x5000          /* Multichannel Frame Delay = 5                                 */
-#define MFD_6          0x6000          /* Multichannel Frame Delay = 6                                 */
-#define MFD_7          0x7000          /* Multichannel Frame Delay = 7                                 */
-#define MFD_8          0x8000          /* Multichannel Frame Delay = 8                                 */
-#define MFD_9          0x9000          /* Multichannel Frame Delay = 9                                 */
-#define MFD_10         0xA000          /* Multichannel Frame Delay = 10                                */
-#define MFD_11         0xB000          /* Multichannel Frame Delay = 11                                */
-#define MFD_12         0xC000          /* Multichannel Frame Delay = 12                                */
-#define MFD_13         0xD000          /* Multichannel Frame Delay = 13                                */
-#define MFD_14         0xE000          /* Multichannel Frame Delay = 14                                */
-#define MFD_15         0xF000          /* Multichannel Frame Delay = 15                                */
-
 /*  *********  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 
 /*  PPI_CONTROL Masks         */
index aad61b8873731f5ed6877e189ab61bf4e2a85dfa..6f56907a18c027c903491f52e1c4bcfeaca36f72 100644 (file)
 #define PH14   0x4000
 #define PH15   0x8000
 
-/* *******************  SERIAL PORT MASKS  **************************************/
-/* SPORTx_TCR1 Masks                                                                                                                   */
-#define TSPEN          0x0001  /* Transmit Enable                                                              */
-#define ITCLK          0x0002  /* Internal Transmit Clock Select                               */
-#define DTYPE_NORM     0x0004  /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008  /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C  /* Compand Using A-Law                                                  */
-#define TLSBIT         0x0010  /* Transmit Bit Order                                                   */
-#define ITFS           0x0200  /* Internal Transmit Frame Sync Select                  */
-#define TFSR           0x0400  /* Transmit Frame Sync Required Select                  */
-#define DITFS          0x0800  /* Data-Independent Transmit Frame Sync Select  */
-#define LTFS           0x1000  /* Low Transmit Frame Sync Select                               */
-#define LATFS          0x2000  /* Late Transmit Frame Sync Select                              */
-#define TCKFE          0x4000  /* Clock Falling Edge Select                                    */
-
-/* SPORTx_TCR2 Masks and Macro                                                                                                 */
-#define SLEN(x)                ((x)&0x1F)      /* SPORT TX Word Length (2 - 31)                                */
-#define TXSE           0x0100  /* TX Secondary Enable                                                  */
-#define TSFSE          0x0200  /* Transmit Stereo Frame Sync Enable                    */
-#define TRFST          0x0400  /* Left/Right Order (1 = Right Channel 1st)             */
-
-/* SPORTx_RCR1 Masks                                                                                                                   */
-#define RSPEN          0x0001  /* Receive Enable                                                               */
-#define IRCLK          0x0002  /* Internal Receive Clock Select                                */
-#define DTYPE_NORM     0x0004  /* Data Format Normal                                                   */
-#define DTYPE_ULAW     0x0008  /* Compand Using u-Law                                                  */
-#define DTYPE_ALAW     0x000C  /* Compand Using A-Law                                                  */
-#define RLSBIT         0x0010  /* Receive Bit Order                                                    */
-#define IRFS           0x0200  /* Internal Receive Frame Sync Select                   */
-#define RFSR           0x0400  /* Receive Frame Sync Required Select                   */
-#define LRFS           0x1000  /* Low Receive Frame Sync Select                                */
-#define LARFS          0x2000  /* Late Receive Frame Sync Select                               */
-#define RCKFE          0x4000  /* Clock Falling Edge Select                                    */
-
-/* SPORTx_RCR2 Masks                                                                                                                   */
-#define SLEN(x)                ((x)&0x1F)      /* SPORT RX Word Length (2 - 31)                                */
-#define RXSE           0x0100  /* RX Secondary Enable                                                  */
-#define RSFSE          0x0200  /* RX Stereo Frame Sync Enable                                  */
-#define RRFST          0x0400  /* Right-First Data Order                                               */
-
-/* SPORTx_STAT Masks                                                                                                                   */
-#define RXNE           0x0001  /* Receive FIFO Not Empty Status                                */
-#define RUVF           0x0002  /* Sticky Receive Underflow Status                              */
-#define ROVF           0x0004  /* Sticky Receive Overflow Status                               */
-#define TXF                    0x0008  /* Transmit FIFO Full Status                                    */
-#define TUVF           0x0010  /* Sticky Transmit Underflow Status                             */
-#define TOVF           0x0020  /* Sticky Transmit Overflow Status                              */
-#define TXHRE          0x0040  /* Transmit Hold Register Empty                                 */
-
-/* SPORTx_MCMC1 Macros                                                                                                                 */
-#define SP_WOFF(x)             ((x) & 0x3FF)   /* Multichannel Window Offset Field                     */
-
-/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits                                           */
-#define SP_WSIZE(x)    (((((x)>>0x3)-1)&0xF) << 0xC)   /* Multichannel Window Size = (x/8)-1   */
-
-/* SPORTx_MCMC2 Masks                                                                                                                  */
-#define REC_BYPASS     0x0000  /* Bypass Mode (No Clock Recovery)                              */
-#define REC_2FROM4     0x0002  /* Recover 2 MHz Clock from 4 MHz Clock                 */
-#define REC_8FROM16    0x0003  /* Recover 8 MHz Clock from 16 MHz Clock                */
-#define MCDTXPE                0x0004  /* Multichannel DMA Transmit Packing                    */
-#define MCDRXPE                0x0008  /* Multichannel DMA Receive Packing                             */
-#define MCMEN          0x0010  /* Multichannel Frame Mode Enable                               */
-#define FSDR           0x0080  /* Multichannel Frame Sync to Data Relationship */
-#define MFD_0          0x0000  /* Multichannel Frame Delay = 0                                 */
-#define MFD_1          0x1000  /* Multichannel Frame Delay = 1                                 */
-#define MFD_2          0x2000  /* Multichannel Frame Delay = 2                                 */
-#define MFD_3          0x3000  /* Multichannel Frame Delay = 3                                 */
-#define MFD_4          0x4000  /* Multichannel Frame Delay = 4                                 */
-#define MFD_5          0x5000  /* Multichannel Frame Delay = 5                                 */
-#define MFD_6          0x6000  /* Multichannel Frame Delay = 6                                 */
-#define MFD_7          0x7000  /* Multichannel Frame Delay = 7                                 */
-#define MFD_8          0x8000  /* Multichannel Frame Delay = 8                                 */
-#define MFD_9          0x9000  /* Multichannel Frame Delay = 9                                 */
-#define MFD_10         0xA000  /* Multichannel Frame Delay = 10                                */
-#define MFD_11         0xB000  /* Multichannel Frame Delay = 11                                */
-#define MFD_12         0xC000  /* Multichannel Frame Delay = 12                                */
-#define MFD_13         0xD000  /* Multichannel Frame Delay = 13                                */
-#define MFD_14         0xE000  /* Multichannel Frame Delay = 14                                */
-#define MFD_15         0xF000  /* Multichannel Frame Delay = 15                                */
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
 /* EBIU_AMGCTL Masks                                                                                                                                   */
 #define AMCKEN                 0x0001  /* Enable CLKOUT                                                                        */
index b674a1c4aef1efdbe30eb6c1cb5ca76d2bd260f6..fe43062b4975a5548f259a487bc83a8330842797 100644 (file)
 #define        UCEN_P          0x00
 
 
-/* **********  SERIAL PORT MASKS  ********************** */
-/* SPORTx_TCR1 Masks */
-#define        TSPEN           0x0001  /* TX enable  */
-#define        ITCLK           0x0002  /* Internal TX Clock Select  */
-#define        TDTYPE          0x000C  /* TX Data Formatting Select */
-#define        DTYPE_NORM      0x0000          /* Data Format Normal */
-#define        DTYPE_ULAW      0x0008          /* Compand Using u-Law */
-#define        DTYPE_ALAW      0x000C          /* Compand Using A-Law */
-#define        TLSBIT          0x0010  /* TX Bit Order */
-#define        ITFS            0x0200  /* Internal TX Frame Sync Select  */
-#define        TFSR            0x0400  /* TX Frame Sync Required Select  */
-#define        DITFS           0x0800  /* Data Independent TX Frame Sync Select  */
-#define        LTFS            0x1000  /* Low TX Frame Sync Select  */
-#define        LATFS           0x2000  /* Late TX Frame Sync Select  */
-#define        TCKFE           0x4000  /* TX Clock Falling Edge Select */
-/* SPORTx_RCR1 Deprecated Masks                                                                 */
-#define        TULAW           DTYPE_ULAW              /* Compand Using u-Law */
-#define        TALAW           DTYPE_ALAW                      /* Compand Using A-Law */
-
-/* SPORTx_TCR2 Masks */
-#ifdef _MISRA_RULES
-#define        SLEN(x)         ((x)&0x1Fu)     /* SPORT TX Word Length (2 - 31) */
-#else
-#define        SLEN(x)         ((x)&0x1F)      /* SPORT TX Word Length (2 - 31) */
-#endif /* _MISRA_RULES */
-#define        TXSE            0x0100  /*TX Secondary Enable */
-#define        TSFSE           0x0200  /*TX Stereo Frame Sync Enable */
-#define        TRFST           0x0400  /*TX Right-First Data Order  */
-
-/* SPORTx_RCR1 Masks */
-#define        RSPEN           0x0001  /* RX enable  */
-#define        IRCLK           0x0002  /* Internal RX Clock Select  */
-#define        RDTYPE          0x000C  /* RX Data Formatting Select */
-#define        DTYPE_NORM      0x0000          /* no companding */
-#define        DTYPE_ULAW      0x0008          /* Compand Using u-Law */
-#define        DTYPE_ALAW      0x000C          /* Compand Using A-Law */
-#define        RLSBIT          0x0010  /* RX Bit Order */
-#define        IRFS            0x0200  /* Internal RX Frame Sync Select  */
-#define        RFSR            0x0400  /* RX Frame Sync Required Select  */
-#define        LRFS            0x1000  /* Low RX Frame Sync Select  */
-#define        LARFS           0x2000  /* Late RX Frame Sync Select  */
-#define        RCKFE           0x4000  /* RX Clock Falling Edge Select */
-/* SPORTx_RCR1 Deprecated Masks                                                                 */
-#define        RULAW           DTYPE_ULAW              /* Compand Using u-Law */
-#define        RALAW           DTYPE_ALAW                      /* Compand Using A-Law */
-
-/* SPORTx_RCR2 Masks */
-#ifdef _MISRA_RULES
-#define        SLEN(x)         ((x)&0x1Fu)     /* SPORT RX Word Length (2 - 31) */
-#else
-#define        SLEN(x)         ((x)&0x1F)      /* SPORT RX Word Length (2 - 31) */
-#endif /* _MISRA_RULES */
-#define        RXSE            0x0100  /*RX Secondary Enable */
-#define        RSFSE           0x0200  /*RX Stereo Frame Sync Enable */
-#define        RRFST           0x0400  /*Right-First Data Order  */
-
-/*SPORTx_STAT Masks */
-#define        RXNE            0x0001          /*RX FIFO Not Empty Status */
-#define        RUVF            0x0002          /*RX Underflow Status */
-#define        ROVF            0x0004          /*RX Overflow Status */
-#define        TXF                     0x0008          /*TX FIFO Full Status */
-#define        TUVF            0x0010          /*TX Underflow Status */
-#define        TOVF            0x0020          /*TX Overflow Status */
-#define        TXHRE           0x0040          /*TX Hold Register Empty */
-
-/*SPORTx_MCMC1 Masks */
-#define        WOFF                    0x000003FF      /*Multichannel Window Offset Field */
-/* SPORTx_MCMC1        Macros                                                           */
-#ifdef _MISRA_RULES
-#define        SET_WOFF(x)             ((x) & 0x3FFu)  /* Multichannel Window Offset Field */
-/* Only        use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define        SET_WSIZE(x)    (((((x)>>0x3)-1u)&0xFu) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-#else
-#define        SET_WOFF(x)             ((x) & 0x3FF)   /* Multichannel Window Offset Field */
-/* Only        use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define        SET_WSIZE(x)    (((((x)>>0x3)-1)&0xF) << 0xC)   /* Multichannel Window Size = (x/8)-1 */
-#endif /* _MISRA_RULES */
-
-
-/*SPORTx_MCMC2 Masks */
-#define        MCCRM           0x0003  /*Multichannel Clock Recovery Mode */
-#define        REC_BYPASS      0x0000          /* Bypass Mode (No Clock Recovery) */
-#define        REC_2FROM4      0x0002          /* Recover 2 MHz Clock from 4 MHz Clock */
-#define        REC_8FROM16     0x0003          /* Recover 8 MHz Clock from 16 MHz Clock */
-#define        MCDTXPE         0x0004  /*Multichannel DMA Transmit Packing */
-#define        MCDRXPE         0x0008  /*Multichannel DMA Receive Packing */
-#define        MCMEN           0x0010  /*Multichannel Frame Mode Enable */
-#define        FSDR            0x0080  /*Multichannel Frame Sync to Data Relationship */
-#define        MFD                     0xF000  /*Multichannel Frame Delay    */
-#define        MFD_0           0x0000          /* Multichannel Frame Delay = 0 */
-#define        MFD_1           0x1000          /* Multichannel Frame Delay = 1 */
-#define        MFD_2           0x2000          /* Multichannel Frame Delay = 2 */
-#define        MFD_3           0x3000          /* Multichannel Frame Delay = 3 */
-#define        MFD_4           0x4000          /* Multichannel Frame Delay = 4 */
-#define        MFD_5           0x5000          /* Multichannel Frame Delay = 5 */
-#define        MFD_6           0x6000          /* Multichannel Frame Delay = 6 */
-#define        MFD_7           0x7000          /* Multichannel Frame Delay = 7 */
-#define        MFD_8           0x8000          /* Multichannel Frame Delay = 8 */
-#define        MFD_9           0x9000          /* Multichannel Frame Delay = 9 */
-#define        MFD_10          0xA000          /* Multichannel Frame Delay = 10 */
-#define        MFD_11          0xB000          /* Multichannel Frame Delay = 11 */
-#define        MFD_12          0xC000          /* Multichannel Frame Delay = 12 */
-#define        MFD_13          0xD000          /* Multichannel Frame Delay = 13 */
-#define        MFD_14          0xE000          /* Multichannel Frame Delay = 14 */
-#define        MFD_15          0xF000          /* Multichannel Frame Delay = 15 */
-
-
 /*  *********  PARALLEL        PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 /*  PPI_CONTROL        Masks         */
 #define        PORT_EN         0x0001  /* PPI Port Enable  */
index dbb6b1d83f6dc7a2d0dc1861f23d0856a1170bd0..0c38eec9ade11f77f7422dc4dcf42a29e6dc3be0 100644 (file)
@@ -706,7 +706,6 @@ static struct mtd_partition partition_info[] = {
 };
 
 static struct bf5xx_nand_platform bf5xx_nand_platform = {
-       .page_size = NFC_PG_SIZE_256,
        .data_width = NFC_NWIDTH_8,
        .partitions = partition_info,
        .nr_partitions = ARRAY_SIZE(partition_info),
index 6fcfb9187c357bcf1b69f5ae821fec5386f6dc6b..56682a36e42df0bf7be8825c7e4789d315a5de90 100644 (file)
@@ -849,7 +849,6 @@ static struct mtd_partition partition_info[] = {
 };
 
 static struct bf5xx_nand_platform bf5xx_nand_platform = {
-       .page_size = NFC_PG_SIZE_256,
        .data_width = NFC_NWIDTH_8,
        .partitions = partition_info,
        .nr_partitions = ARRAY_SIZE(partition_info),
index 95ff44601fd1e7401ac4955cfe7d0348cd8cb77b..7866197f5485f063229f7e5909d9693251a28d89 100644 (file)
 
 #define                 RCVDATA16  0xffff     /* Receive FIFO 16-Bit Data */
 
-/* Bit masks for SPORTx_TCR1 */
-
-#define                     TCKFE  0x4000     /* Clock Falling Edge Select */
-#define                     LATFS  0x2000     /* Late Transmit Frame Sync */
-#define                      LTFS  0x1000     /* Low Transmit Frame Sync Select */
-#define                     DITFS  0x800      /* Data-Independent Transmit Frame Sync Select */
-#define                      TFSR  0x400      /* Transmit Frame Sync Required Select */
-#define                      ITFS  0x200      /* Internal Transmit Frame Sync Select */
-#define                    TLSBIT  0x10       /* Transmit Bit Order */
-#define                    TDTYPE  0xc        /* Data Formatting Type Select */
-#define                     ITCLK  0x2        /* Internal Transmit Clock Select */
-#define                     TSPEN  0x1        /* Transmit Enable */
-
-/* Bit masks for SPORTx_TCR2 */
-
-#define                     TRFST  0x400      /* Left/Right Order */
-#define                     TSFSE  0x200      /* Transmit Stereo Frame Sync Enable */
-#define                      TXSE  0x100      /* TxSEC Enable */
-#define                    SLEN_T  0x1f       /* SPORT Word Length */
-
-/* Bit masks for SPORTx_RCR1 */
-
-#define                     RCKFE  0x4000     /* Clock Falling Edge Select */
-#define                     LARFS  0x2000     /* Late Receive Frame Sync */
-#define                      LRFS  0x1000     /* Low Receive Frame Sync Select */
-#define                      RFSR  0x400      /* Receive Frame Sync Required Select */
-#define                      IRFS  0x200      /* Internal Receive Frame Sync Select */
-#define                    RLSBIT  0x10       /* Receive Bit Order */
-#define                    RDTYPE  0xc        /* Data Formatting Type Select */
-#define                     IRCLK  0x2        /* Internal Receive Clock Select */
-#define                     RSPEN  0x1        /* Receive Enable */
-
-/* Bit masks for SPORTx_RCR2 */
-
-#define                     RRFST  0x400      /* Left/Right Order */
-#define                     RSFSE  0x200      /* Receive Stereo Frame Sync Enable */
-#define                      RXSE  0x100      /* RxSEC Enable */
-#define                    SLEN_R  0x1f       /* SPORT Word Length */
-
-/* Bit masks for SPORTx_STAT */
-
-#define                     TXHRE  0x40       /* Transmit Hold Register Empty */
-#define                      TOVF  0x20       /* Sticky Transmit Overflow Status */
-#define                      TUVF  0x10       /* Sticky Transmit Underflow Status */
-#define                       TXF  0x8        /* Transmit FIFO Full Status */
-#define                      ROVF  0x4        /* Sticky Receive Overflow Status */
-#define                      RUVF  0x2        /* Sticky Receive Underflow Status */
-#define                      RXNE  0x1        /* Receive FIFO Not Empty Status */
-
-/* Bit masks for SPORTx_MCMC1 */
-
-#define                  SP_WSIZE  0xf000     /* Window Size */
-#define                   SP_WOFF  0x3ff      /* Windows Offset */
-
-/* Bit masks for SPORTx_MCMC2 */
-
-#define                       MFD  0xf000     /* Multi channel Frame Delay */
-#define                      FSDR  0x80       /* Frame Sync to Data Relationship */
-#define                  MCMEN  0x10       /* Multi channel Frame Mode Enable */
-#define                   MCDRXPE  0x8        /* Multi channel DMA Receive Packing */
-#define                   MCDTXPE  0x4        /* Multi channel DMA Transmit Packing */
-#define                     MCCRM  0x3        /* 2X Clock Recovery Mode */
-
-/* Bit masks for SPORTx_CHNL */
-
-#define                  CUR_CHNL  0x3ff      /* Current Channel Indicator */
-
 /* Bit masks for UARTx_LCR */
 
 #if 0
index 4c8e36b7fb33752144d616fe7f406d1b968f8906..2674f0097576d138d2c4f4c1ba8940e103a9e393 100644 (file)
 #define IREN_P 0x01
 #define UCEN_P 0x00
 
-/* **********  SERIAL PORT MASKS  ********************** */
-
-/* SPORTx_TCR1 Masks */
-#define TSPEN    0x0001                /* TX enable  */
-#define ITCLK    0x0002                /* Internal TX Clock Select  */
-#define TDTYPE   0x000C                /* TX Data Formatting Select */
-#define TLSBIT   0x0010                /* TX Bit Order */
-#define ITFS     0x0200                /* Internal TX Frame Sync Select  */
-#define TFSR     0x0400                /* TX Frame Sync Required Select  */
-#define DITFS    0x0800                /* Data Independent TX Frame Sync Select  */
-#define LTFS     0x1000                /* Low TX Frame Sync Select  */
-#define LATFS    0x2000                /* Late TX Frame Sync Select  */
-#define TCKFE    0x4000                /* TX Clock Falling Edge Select  */
-
-/* SPORTx_TCR2 Masks */
-#define SLEN       0x001F      /*TX Word Length  */
-#define TXSE        0x0100     /*TX Secondary Enable */
-#define TSFSE       0x0200     /*TX Stereo Frame Sync Enable */
-#define TRFST       0x0400     /*TX Right-First Data Order  */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN    0x0001                /* RX enable  */
-#define IRCLK    0x0002                /* Internal RX Clock Select  */
-#define RDTYPE   0x000C                /* RX Data Formatting Select */
-#define RULAW    0x0008                /* u-Law enable  */
-#define RALAW    0x000C                /* A-Law enable  */
-#define RLSBIT   0x0010                /* RX Bit Order */
-#define IRFS     0x0200                /* Internal RX Frame Sync Select  */
-#define RFSR     0x0400                /* RX Frame Sync Required Select  */
-#define LRFS     0x1000                /* Low RX Frame Sync Select  */
-#define LARFS    0x2000                /* Late RX Frame Sync Select  */
-#define RCKFE    0x4000                /* RX Clock Falling Edge Select  */
-
-/* SPORTx_RCR2 Masks */
-#define SLEN       0x001F      /*RX Word Length  */
-#define RXSE        0x0100     /*RX Secondary Enable */
-#define RSFSE       0x0200     /*RX Stereo Frame Sync Enable */
-#define RRFST       0x0400     /*Right-First Data Order  */
-
-/*SPORTx_STAT Masks */
-#define RXNE           0x0001  /*RX FIFO Not Empty Status */
-#define RUVF           0x0002  /*RX Underflow Status */
-#define ROVF           0x0004  /*RX Overflow Status */
-#define TXF            0x0008  /*TX FIFO Full Status */
-#define TUVF           0x0010  /*TX Underflow Status */
-#define TOVF           0x0020  /*TX Overflow Status */
-#define TXHRE          0x0040  /*TX Hold Register Empty */
-
-/*SPORTx_MCMC1 Masks */
-#define SP_WSIZE               0x0000F000      /*Multichannel Window Size Field */
-#define SP_WOFF                0x000003FF      /*Multichannel Window Offset Field */
-
-/*SPORTx_MCMC2 Masks */
-#define MCCRM          0x00000003      /*Multichannel Clock Recovery Mode */
-#define MCDTXPE                0x00000004      /*Multichannel DMA Transmit Packing */
-#define MCDRXPE                0x00000008      /*Multichannel DMA Receive Packing */
-#define MCMEN          0x00000010      /*Multichannel Frame Mode Enable */
-#define FSDR           0x00000080      /*Multichannel Frame Sync to Data Relationship */
-#define MFD            0x0000F000      /*Multichannel Frame Delay    */
-
 /*  *********  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 
 /*  PPI_CONTROL Masks         */
index a5847f5d67c7325ae6c53996f02f0b7d348267c3..af1bffa21dc14dccdefca16268da5c737024dfa8 100644 (file)
@@ -1628,6 +1628,9 @@ ENTRY(_sys_call_table)
        .long _sys_rt_tgsigqueueinfo
        .long _sys_perf_event_open
        .long _sys_recvmmsg             /* 370 */
+       .long _sys_fanotify_init
+       .long _sys_fanotify_mark
+       .long _sys_prlimit64
 
        .rept NR_syscalls-(.-_sys_call_table)/4
        .long _sys_ni_syscall
index e936804b7508758112b853bfca42f9992ebadb44..984221abb66d3d470483f0f14a7271ef944dce21 100644 (file)
@@ -18,7 +18,8 @@
 
 static __inline__ int atomic_add_return(int i, atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        ret = v->counter += i;
        local_irq_restore(flags);
@@ -30,7 +31,8 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
 
 static __inline__ int atomic_sub_return(int i, atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        ret = v->counter -= i;
        local_irq_restore(flags);
@@ -42,7 +44,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
 
 static __inline__ int atomic_inc_return(atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        v->counter++;
        ret = v->counter;
@@ -64,7 +67,8 @@ static __inline__ int atomic_inc_return(atomic_t *v)
 
 static __inline__ int atomic_dec_return(atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        --v->counter;
        ret = v->counter;
@@ -76,7 +80,8 @@ static __inline__ int atomic_dec_return(atomic_t *v)
 
 static __inline__ int atomic_dec_and_test(atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        --v->counter;
        ret = v->counter;
index d98d97685f068da986c8639bd29953d94a8ceb8d..16bf1560ff680c87fad71fa4f06af4a0b2622213 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/linkage.h>
 
+struct pt_regs;
+
 /*
  * switch_to(n) should switch tasks to task ptr, first checking that
  * ptr isn't the current task, in which case it does nothing.  This
@@ -155,6 +157,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 
 #define arch_align_stack(x) (x)
 
-void die(char *str, struct pt_regs *fp, unsigned long err);
+extern void die(const char *str, struct pt_regs *fp, unsigned long err);
 
 #endif /* _H8300_SYSTEM_H */
index dc1ac0243b78d0532640c55517753cdbdeacc8f0..aaf5e5a48f93167c2513a00e56155b166ee631b8 100644 (file)
@@ -56,8 +56,8 @@ int kernel_execve(const char *filename,
                  const char *const envp[])
 {
        register long res __asm__("er0");
-       register char *const *_c __asm__("er3") = envp;
-       register char *const *_b __asm__("er2") = argv;
+       register const char *const *_c __asm__("er3") = envp;
+       register const char *const *_b __asm__("er2") = argv;
        register const char * _a __asm__("er1") = filename;
        __asm__ __volatile__ ("mov.l %1,er0\n\t"
                        "trapa  #0\n\t"
index 3c0b66bc669eca40ced007cccb5d2ce5d6695904..dfa05bd908b6b22139c388ebbe983721c39b6100 100644 (file)
@@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp)
        printk("\n\n");
 }
 
-void die(char *str, struct pt_regs *fp, unsigned long err)
+void die(const char *str, struct pt_regs *fp, unsigned long err)
 {
        static int diecount;
 
index 2bef5261d96dce13c983758a68f0e280041bdaa5..1e8d71ad93ef6ecd57bbfcc918067e6998b443d7 100644 (file)
@@ -149,7 +149,7 @@ static  void receive_chars(struct tty_struct *tty)
                                                ch = ia64_ssc(0, 0, 0, 0,
                                                              SSC_GETCHAR);
                                        while (!ch);
-                                       handle_sysrq(ch, NULL);
+                                       handle_sysrq(ch);
                                }
 #endif
                                seen_esc = 0;
index a91b2713451da0e5290a3889e1bcebbb530c0e4b..ef332136f96dababcbba1e574d2143847f7b368c 100644 (file)
@@ -150,6 +150,8 @@ SECTIONS {
                _sdata = . ;
                DATA_DATA
                CACHELINE_ALIGNED_DATA(32)
+               PAGE_ALIGNED_DATA(PAGE_SIZE)
+               *(.data..shared_aligned)
                INIT_TASK_DATA(THREAD_SIZE)
                _edata = . ;
        } > DATA
index 4e34880bea03b889dcbcaea5865d576045bf1707..159acb02cfd4c16dcd1e91f9268b8d9dc6e74463 100644 (file)
@@ -25,7 +25,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
        unsigned long addr;
        void *ret;
 
-       printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
+       pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
+                dev ? dev_name(dev) : "?", size, gfp);
 
        if (0xbe000000 - pci_sram_allocated >= size) {
                size = (size + 255) & ~255;
index e3ea151c95975a9434995dd7e2da3f6033d6c912..b7212b619c52377ddeb471cf8e32f6080522cafa 100644 (file)
@@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE)  += arch/powerpc/oprofile/
 all: zImage
 
 # With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 := zImage zImage.initrd uImaged
+BOOT_TARGETS1 := zImage zImage.initrd uImage
 BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
 
 PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
index 5806ef0b860b5adcf8342e472c744f40cbf8821a..a30370396250ae2d524cac63e987bc5a80f7199b 100644 (file)
                        interrupts = <0x1e 4>;
                };
 
+               SATA0: sata@bffd1000 {
+                       compatible = "amcc,sata-460ex";
+                       reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
+                       interrupt-parent = <&UIC3>;
+                       interrupts = <0x0 0x4       /* SATA */
+                                     0x5 0x4>;     /* AHBDMA */
+               };
+
                POB0: opb {
                        compatible = "ibm,opb-460ex", "ibm,opb";
                        #address-cells = <1>;
index a67aeed17d405fbc37e3a44a860c67a3a1dd9fcf..debc5ed96d6e087a2e241e47421f537a13bf1feb 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
 #define __ARCH_POWERPC_ASM_FSLDMA_H__
 
+#include <linux/slab.h>
 #include <linux/dmaengine.h>
 
 /*
index 0e398cfee2c82826b6abad1eb8ece6330df6b0b4..acac35d5b382da579cd53061c089a88a7758bb2f 100644 (file)
@@ -433,7 +433,7 @@ typedef struct {
  * with.  However gcc is not clever enough to compute the
  * modulus (2^n-1) without a second multiply.
  */
-#define vsid_scrample(protovsid, size) \
+#define vsid_scramble(protovsid, size) \
        ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
 
 #else /* 1 */
index d8be016d2ede49e750ad68c478203afe7b45f9a7..ff0005eec7dd0d3d3c54946bcbf51cdfdf8bb569 100644 (file)
 #ifdef CONFIG_PPC64
 
 extern void ppc64_runlatch_on(void);
-extern void ppc64_runlatch_off(void);
+extern void __ppc64_runlatch_off(void);
+
+#define ppc64_runlatch_off()                                   \
+       do {                                                    \
+               if (cpu_has_feature(CPU_FTR_CTRL) &&            \
+                   test_thread_flag(TIF_RUNLATCH))             \
+                       __ppc64_runlatch_off();                 \
+       } while (0)
 
 extern unsigned long scom970_read(unsigned int address);
 extern void scom970_write(unsigned int address, unsigned long value);
index 24cd9281ec3726a55c95a7a9f8d076c76b97e2b8..8447d89fbe72639a6a0ce2db68694554bd7c61e8 100644 (file)
 /*
  * the semaphore definition
  */
-struct rw_semaphore {
-       /* XXX this should be able to be an atomic_t  -- paulus */
-       signed int              count;
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
+#ifdef CONFIG_PPC64
+# define RWSEM_ACTIVE_MASK             0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK             0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE           0x00000000L
+#define RWSEM_ACTIVE_BIAS              0x00000001L
+#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+struct rw_semaphore {
+       long                    count;
        spinlock_t              wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -43,9 +48,13 @@ struct rw_semaphore {
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-         LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name)                              \
+{                                                              \
+       RWSEM_UNLOCKED_VALUE,                                   \
+       __SPIN_LOCK_UNLOCKED((name).wait_lock),                 \
+       LIST_HEAD_INIT((name).wait_list)                        \
+       __RWSEM_DEP_MAP_INIT(name)                              \
+}
 
 #define DECLARE_RWSEM(name)            \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  */
 static inline void __down_read(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
+       if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
                rwsem_down_read_failed(sem);
 }
 
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
        while ((tmp = sem->count) >= 0) {
                if (tmp == cmpxchg(&sem->count, tmp,
@@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
  */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       int tmp;
+       long tmp;
 
-       tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-                               (atomic_t *)(&sem->count));
+       tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                    (atomic_long_t *)&sem->count);
        if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
                rwsem_down_write_failed(sem);
 }
@@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem)
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
        tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
                      RWSEM_ACTIVE_WRITE_BIAS);
@@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
  */
 static inline void __up_read(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
-       tmp = atomic_dec_return((atomic_t *)(&sem->count));
+       tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
        if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
                rwsem_wake(sem);
 }
@@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-                             (atomic_t *)(&sem->count)) < 0))
+       if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                (atomic_long_t *)&sem->count) < 0))
                rwsem_wake(sem);
 }
 
 /*
  * implement atomic add functionality
  */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 {
-       atomic_add(delta, (atomic_t *)(&sem->count));
+       atomic_long_add(delta, (atomic_long_t *)&sem->count);
 }
 
 /*
@@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  */
 static inline void __downgrade_write(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
-       tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+       tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+                                    (atomic_long_t *)&sem->count);
        if (tmp < 0)
                rwsem_downgrade_wake(sem);
 }
@@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 /*
  * implement exchange and add functionality
  */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
+       return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
 }
 
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
 {
-       return (sem->count != 0);
+       return sem->count != 0;
 }
 
 #endif /* __KERNEL__ */
index a5ee345b6a5cc27134bb42742641048aeba48eb6..3d212669a130a5621d1006694c75785f08c87d9d 100644 (file)
@@ -326,3 +326,6 @@ SYSCALL_SPU(perf_event_open)
 COMPAT_SYS_SPU(preadv)
 COMPAT_SYS_SPU(pwritev)
 COMPAT_SYS(rt_tgsigqueueinfo)
+SYSCALL(fanotify_init)
+COMPAT_SYS(fanotify_mark)
+SYSCALL_SPU(prlimit64)
index f0a10266e7f7b0245180661ae3dc0de2cd92390e..597e6f9d094a95dd51ff80873e8425d75ed94759 100644 (file)
 #define __NR_preadv            320
 #define __NR_pwritev           321
 #define __NR_rt_tgsigqueueinfo 322
+#define __NR_fanotify_init     323
+#define __NR_fanotify_mark     324
+#define __NR_prlimit64         325
 
 #ifdef __KERNEL__
 
-#define __NR_syscalls          323
+#define __NR_syscalls          326
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 65e2b4e10f97cfb0fa068571c8ec6ac41c59029f..1f9123f412ec3c5a7a817f58e6316298d6ce7e05 100644 (file)
@@ -1826,7 +1826,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_features           = CPU_FTRS_47X,
                .cpu_user_features      = COMMON_USER_BOOKE |
                        PPC_FEATURE_HAS_FPU,
-               .cpu_user_features      = COMMON_USER_BOOKE,
                .mmu_features           = MMU_FTR_TYPE_47x |
                        MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
                .icache_bsize           = 32,
index 417f7b05a9cebc6e10acfaf41eec88d17a2f7e89..4457382f8667a7e770f51c9cb82ee09f9531fb1b 100644 (file)
@@ -402,6 +402,18 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
         */
        hard_irq_disable();
 
+       /*
+        * Make a note of crashing cpu. Will be used in machine_kexec
+        * such that another IPI will not be sent.
+        */
+       crashing_cpu = smp_processor_id();
+       crash_save_cpu(regs, crashing_cpu);
+       crash_kexec_prepare_cpus(crashing_cpu);
+       cpu_set(crashing_cpu, cpus_in_crash);
+#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+       crash_kexec_wait_realmode(crashing_cpu);
+#endif
+
        for_each_irq(i) {
                struct irq_desc *desc = irq_to_desc(i);
 
@@ -438,18 +450,8 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
        crash_shutdown_cpu = -1;
        __debugger_fault_handler = old_handler;
 
-       /*
-        * Make a note of crashing cpu. Will be used in machine_kexec
-        * such that another IPI will not be sent.
-        */
-       crashing_cpu = smp_processor_id();
-       crash_save_cpu(regs, crashing_cpu);
-       crash_kexec_prepare_cpus(crashing_cpu);
-       cpu_set(crashing_cpu, cpus_in_crash);
        crash_kexec_stop_spus();
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
-       crash_kexec_wait_realmode(crashing_cpu);
-#endif
+
        if (ppc_md.kexec_cpu_down)
                ppc_md.kexec_cpu_down(1, 0);
 }
index 5ab484ef06a720770f72d9883340a045a180f520..562305b40a8e7c669adc95cc3cfc5edaca87e5e5 100644 (file)
@@ -113,6 +113,10 @@ _ENTRY(_start);
        stw     r5, 0(r4)       /* Save abatron_pteptrs at a fixed location */
        stw     r6, 0(r5)
 
+       /* Clear the Machine Check Syndrome Register */
+       li      r0,0
+       mtspr   SPRN_MCSR,r0
+
        /* Let's move on */
        lis     r4,start_kernel@h
        ori     r4,r4,start_kernel@l
index 844a44b6447254889a10c35c2acf877981a4f16e..c571cd3c14532db29777c07977582e736be5a9aa 100644 (file)
@@ -572,15 +572,21 @@ __secondary_start:
        /* Set thread priority to MEDIUM */
        HMT_MEDIUM
 
-       /* Do early setup for that CPU (stab, slb, hash table pointer) */
-       bl      .early_setup_secondary
-
        /* Initialize the kernel stack.  Just a repeat for iSeries.      */
        LOAD_REG_ADDR(r3, current_set)
        sldi    r28,r24,3               /* get current_set[cpu#]         */
-       ldx     r1,r3,r28
-       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
-       std     r1,PACAKSAVE(r13)
+       ldx     r14,r3,r28
+       addi    r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       std     r14,PACAKSAVE(r13)
+
+       /* Do early setup for that CPU (stab, slb, hash table pointer) */
+       bl      .early_setup_secondary
+
+       /*
+        * setup the new stack pointer, but *don't* use this until
+        * translation is on.
+        */
+       mr      r1, r14
 
        /* Clear backchain so we get nice backtraces */
        li      r7,0
index 049dda60e4750296e7b890e752e6420d0d1fb769..39a2baa6ad58aec28a3c1d0782753569bbb51f6e 100644 (file)
@@ -94,9 +94,9 @@ void cpu_idle(void)
                HMT_medium();
                ppc64_runlatch_on();
                tick_nohz_restart_sched_tick();
+               preempt_enable_no_resched();
                if (cpu_should_die())
                        cpu_die();
-               preempt_enable_no_resched();
                schedule();
                preempt_disable();
        }
index d3ce67cf03be35855394905009d6a39729f7ac92..4a65386995d7fa697f86e32b77503558f1198961 100644 (file)
@@ -67,6 +67,7 @@
 #include <asm/machdep.h>
 #include <asm/udbg.h>
 #include <asm/dbell.h>
+#include <asm/smp.h>
 
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
@@ -446,22 +447,23 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
 void exc_lvl_ctx_init(void)
 {
        struct thread_info *tp;
-       int i;
+       int i, hw_cpu;
 
        for_each_possible_cpu(i) {
-               memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
-               tp = critirq_ctx[i];
+               hw_cpu = get_hard_smp_processor_id(i);
+               memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
+               tp = critirq_ctx[hw_cpu];
                tp->cpu = i;
                tp->preempt_count = 0;
 
 #ifdef CONFIG_BOOKE
-               memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
-               tp = dbgirq_ctx[i];
+               memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
+               tp = dbgirq_ctx[hw_cpu];
                tp->cpu = i;
                tp->preempt_count = 0;
 
-               memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
-               tp = mcheckirq_ctx[i];
+               memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
+               tp = mcheckirq_ctx[hw_cpu];
                tp->cpu = i;
                tp->preempt_count = HARDIRQ_OFFSET;
 #endif
index 6bbd7a604d243c9f8f9f906d683a9a20df2f9a43..a7a570dcdd571c8fb861562e549dbf094147fb32 100644 (file)
@@ -810,6 +810,9 @@ relocate_new_kernel:
        isync
        sync
 
+       mfspr   r3, SPRN_PIR /* current core we are running on */
+       mr      r4, r5 /* load physical address of chunk called */
+
        /* jump to the entry point, usually the setup routine */
        mtlr    r5
        blrl
index 6ddb795f83e8e9ccd62e713d997599baee77122a..e751506323b4509bf04abefb266a6435173a307f 100644 (file)
@@ -336,7 +336,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
                if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
                    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
                        struct device_node *child = pci_device_to_OF_node(dev);
-                       if (dev)
+                       if (child)
                                of_scan_pci_bridge(child, dev);
                }
        }
index 91356ffda2ca3230e930245a0db1cc58cca323a0..b1c648a36b03cbc2d08104b30ccd21e14bf70644 100644 (file)
@@ -728,7 +728,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                p->thread.regs = childregs;
                if (clone_flags & CLONE_SETTLS) {
 #ifdef CONFIG_PPC64
-                       if (!test_thread_flag(TIF_32BIT))
+                       if (!is_32bit_task())
                                childregs->gpr[13] = childregs->gpr[6];
                        else
 #endif
@@ -823,7 +823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        regs->nip = start;
        regs->msr = MSR_USER;
 #else
-       if (!test_thread_flag(TIF_32BIT)) {
+       if (!is_32bit_task()) {
                unsigned long entry, toc;
 
                /* start is a relocated pointer to the function descriptor for
@@ -995,7 +995,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
        if (usp == 0)
                usp = regs->gpr[1];     /* stack pointer for child */
 #ifdef CONFIG_PPC64
-       if (test_thread_flag(TIF_32BIT)) {
+       if (is_32bit_task()) {
                parent_tidp = TRUNC_PTR(parent_tidp);
                child_tidp = TRUNC_PTR(child_tidp);
        }
@@ -1199,19 +1199,17 @@ void ppc64_runlatch_on(void)
        }
 }
 
-void ppc64_runlatch_off(void)
+void __ppc64_runlatch_off(void)
 {
        unsigned long ctrl;
 
-       if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
-               HMT_medium();
+       HMT_medium();
 
-               clear_thread_flag(TIF_RUNLATCH);
+       clear_thread_flag(TIF_RUNLATCH);
 
-               ctrl = mfspr(SPRN_CTRLF);
-               ctrl &= ~CTRL_RUNLATCH;
-               mtspr(SPRN_CTRLT, ctrl);
-       }
+       ctrl = mfspr(SPRN_CTRLF);
+       ctrl &= ~CTRL_RUNLATCH;
+       mtspr(SPRN_CTRLT, ctrl);
 }
 #endif
 
index a10ffc85ada77cfa802f371725a8d1b86bc7475b..93666f9cabf17fd6c0271332e9f4cb84ded8cd85 100644 (file)
@@ -258,17 +258,18 @@ static void __init irqstack_early_init(void)
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 static void __init exc_lvl_early_init(void)
 {
-       unsigned int i;
+       unsigned int i, hw_cpu;
 
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
         * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
        for_each_possible_cpu(i) {
-               critirq_ctx[i] = (struct thread_info *)
+               hw_cpu = get_hard_smp_processor_id(i);
+               critirq_ctx[hw_cpu] = (struct thread_info *)
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #ifdef CONFIG_BOOKE
-               dbgirq_ctx[i] = (struct thread_info *)
+               dbgirq_ctx[hw_cpu] = (struct thread_info *)
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
-               mcheckirq_ctx[i] = (struct thread_info *)
+               mcheckirq_ctx[hw_cpu] = (struct thread_info *)
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #endif
        }
index 1bee4b68fa452ed4c3894785fdfc8ff80276a6e5..e72690ec9b87489af9e8d30a8e81ac84e03db229 100644 (file)
@@ -95,7 +95,7 @@ int ucache_bsize;
 
 #ifdef CONFIG_SMP
 
-static int smt_enabled_cmdline;
+static char *smt_enabled_cmdline;
 
 /* Look for ibm,smt-enabled OF option */
 static void check_smt_enabled(void)
@@ -103,37 +103,46 @@ static void check_smt_enabled(void)
        struct device_node *dn;
        const char *smt_option;
 
-       /* Allow the command line to overrule the OF option */
-       if (smt_enabled_cmdline)
-               return;
-
-       dn = of_find_node_by_path("/options");
-
-       if (dn) {
-               smt_option = of_get_property(dn, "ibm,smt-enabled", NULL);
+       /* Default to enabling all threads */
+       smt_enabled_at_boot = threads_per_core;
 
-                if (smt_option) {
-                       if (!strcmp(smt_option, "on"))
-                               smt_enabled_at_boot = 1;
-                       else if (!strcmp(smt_option, "off"))
-                               smt_enabled_at_boot = 0;
-                }
-        }
+       /* Allow the command line to overrule the OF option */
+       if (smt_enabled_cmdline) {
+               if (!strcmp(smt_enabled_cmdline, "on"))
+                       smt_enabled_at_boot = threads_per_core;
+               else if (!strcmp(smt_enabled_cmdline, "off"))
+                       smt_enabled_at_boot = 0;
+               else {
+                       long smt;
+                       int rc;
+
+                       rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
+                       if (!rc)
+                               smt_enabled_at_boot =
+                                       min(threads_per_core, (int)smt);
+               }
+       } else {
+               dn = of_find_node_by_path("/options");
+               if (dn) {
+                       smt_option = of_get_property(dn, "ibm,smt-enabled",
+                                                    NULL);
+
+                       if (smt_option) {
+                               if (!strcmp(smt_option, "on"))
+                                       smt_enabled_at_boot = threads_per_core;
+                               else if (!strcmp(smt_option, "off"))
+                                       smt_enabled_at_boot = 0;
+                       }
+
+                       of_node_put(dn);
+               }
+       }
 }
 
 /* Look for smt-enabled= cmdline option */
 static int __init early_smt_enabled(char *p)
 {
-       smt_enabled_cmdline = 1;
-
-       if (!p)
-               return 0;
-
-       if (!strcmp(p, "on") || !strcmp(p, "1"))
-               smt_enabled_at_boot = 1;
-       else if (!strcmp(p, "off") || !strcmp(p, "0"))
-               smt_enabled_at_boot = 0;
-
+       smt_enabled_cmdline = p;
        return 0;
 }
 early_param("smt-enabled", early_smt_enabled);
@@ -380,8 +389,8 @@ void __init setup_system(void)
         */
        xmon_setup();
 
-       check_smt_enabled();
        smp_setup_cpu_maps();
+       check_smt_enabled();
 
 #ifdef CONFIG_SMP
        /* Release secondary cpus out of their spinloops at 0x60 now that
index a61b3ddd7bb3c0cd8fb3a3ec79d91005a7e4294d..0008bc58e826c53b29f9e689336388f16377eb77 100644 (file)
@@ -427,11 +427,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
 #endif
 
        if (!cpu_callin_map[cpu]) {
-               printk("Processor %u is stuck.\n", cpu);
+               printk(KERN_ERR "Processor %u is stuck.\n", cpu);
                return -ENOENT;
        }
 
-       printk("Processor %u found.\n", cpu);
+       DBG("Processor %u found.\n", cpu);
 
        if (smp_ops->give_timebase)
                smp_ops->give_timebase();
index 20fd701a686adfeffc10e718c078f074eab92eaa..b1b6043a56c44f6b07537e8eb4a0aa720400e296 100644 (file)
@@ -616,3 +616,11 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
 
        return sys_sync_file_range(fd, offset, nbytes, flags);
 }
+
+asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+                                        unsigned mask_hi, unsigned mask_lo,
+                                        int dfd, const char __user *pathname)
+{
+       u64 mask = ((u64)mask_hi << 32) | mask_lo;
+       return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
+}
index ce53dfa7130dfc74f551c2360746f206154cf925..8533b3b83f5d0e35b50fb39eefb663e6d81975b0 100644 (file)
@@ -577,20 +577,11 @@ void timer_interrupt(struct pt_regs * regs)
         * some CPUs will continuue to take decrementer exceptions */
        set_dec(DECREMENTER_MAX);
 
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
                do_IRQ(regs);
 #endif
 
-       now = get_tb_or_rtc();
-       if (now < decrementer->next_tb) {
-               /* not time for this event yet */
-               now = decrementer->next_tb - now;
-               if (now <= DECREMENTER_MAX)
-                       set_dec((int)now);
-               trace_timer_interrupt_exit(regs);
-               return;
-       }
        old_regs = set_irq_regs(regs);
        irq_enter();
 
@@ -606,8 +597,16 @@ void timer_interrupt(struct pt_regs * regs)
                get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
-       if (evt->event_handler)
-               evt->event_handler(evt);
+       now = get_tb_or_rtc();
+       if (now >= decrementer->next_tb) {
+               decrementer->next_tb = ~(u64)0;
+               if (evt->event_handler)
+                       evt->event_handler(evt);
+       } else {
+               now = decrementer->next_tb - now;
+               if (now <= DECREMENTER_MAX)
+                       set_dec((int)now);
+       }
 
 #ifdef CONFIG_PPC_ISERIES
        if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
index 00b9436f76525c501ecd8bcfab1abef5911a42d4..fa3469ddaef8d010bca8974765c9c656dfa06635 100644 (file)
@@ -1059,7 +1059,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
        if (!dma_window)
                return NULL;
 
-       tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
        if (tbl == NULL)
                return NULL;
 
@@ -1072,6 +1072,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
        tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
        tbl->it_busno = 0;
        tbl->it_type = TCE_VB;
+       tbl->it_blocksize = 16;
 
        return iommu_init_table(tbl, -1);
 }
index 71f1415e2472399049a2273eded9c4fb8d173265..ace85fa74b2923a63a5924ef6432c84980b41406 100644 (file)
@@ -79,7 +79,9 @@
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 phys_addr_t memstart_addr = ~0;
+EXPORT_SYMBOL_GPL(memstart_addr);
 phys_addr_t kernstart_addr;
+EXPORT_SYMBOL_GPL(kernstart_addr);
 
 void free_initmem(void)
 {
index cfa768203d085e3c3fd7486758513883e40c7ae6..b9d9fed8f36e355083f32d22de3ba2274fa5fb71 100644 (file)
@@ -200,6 +200,7 @@ _GLOBAL(_tlbivax_bcast)
        rlwimi  r5,r4,0,16,31
        wrteei  0
        mtspr   SPRN_MMUCR,r5
+       isync
 /*     tlbivax 0,r3 - use .long to avoid binutils deps */
        .long 0x7c000624 | (r3 << 11)
        isync
index f9751c8905be5db684a25555975a60b3b574d881..83068322abd154004325093e1db9a7b24b9da8d7 100644 (file)
@@ -48,8 +48,10 @@ static int mpc837xmds_usb_cfg(void)
                return -1;
 
        np = of_find_node_by_name(NULL, "usb");
-       if (!np)
-               return -ENODEV;
+       if (!np) {
+               ret = -ENODEV;
+               goto out;
+       }
        phy_type = of_get_property(np, "phy_type", NULL);
        if (phy_type && !strcmp(phy_type, "ulpi")) {
                clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
@@ -65,8 +67,9 @@ static int mpc837xmds_usb_cfg(void)
        }
 
        of_node_put(np);
+out:
        iounmap(bcsr_regs);
-       return 0;
+       return ret;
 }
 
 /* ************************************************************************
index da64be19d0997b75518ea1cdde2ecd6a16cbf884..aa34cac4eb5cc5db47db0e233c32ea9dcf0c09f6 100644 (file)
@@ -357,6 +357,7 @@ static void __init mpc85xx_mds_setup_arch(void)
 {
 #ifdef CONFIG_PCI
        struct pci_controller *hose;
+       struct device_node *np;
 #endif
        dma_addr_t max = 0xffffffff;
 
index e1467c937450791af1ca1355176e780e22b6b86b..34e00902ce86229bf7aafe28eb5e5c9ae4a632e0 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <linux/pci.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
@@ -97,7 +97,7 @@ static void __init p1022_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index d1663db7810f3809fd6ea7658299d15d6425c347..81c9208025fa42833b5472da2ccd75845c8b3cc0 100644 (file)
@@ -106,8 +106,7 @@ config MMIO_NVRAM
 
 config MPIC_U3_HT_IRQS
        bool
-       depends on PPC_MAPLE
-       default y
+       default n
 
 config MPIC_BROKEN_REGREAD
        bool
index 58b13ce3847ec3c92453ef70d5ec4e653d6d886a..26a067122a54705ab07840269a6a9b4e9b9f41a5 100644 (file)
@@ -477,7 +477,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
 
        ioid = cell_iommu_get_ioid(np);
 
-       window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+       window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
        BUG_ON(window == NULL);
 
        window->offset = offset;
index ce61cea0afb52cc77a4fa806da88cd7489518e88..d8b76335bd13d1d9f89dda4861bd928dccbf7703 100644 (file)
@@ -184,7 +184,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
 
        BUG_ON(lsn == NULL);
 
-       tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+       tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
 
        iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
 
index 39df6ab1735a6161ff4d2581368d5127f6a8297a..df423993f17503997d4ba6e808a0f7754078548e 100644 (file)
@@ -2873,12 +2873,11 @@ set_initial_features(void)
 
                /* Switch airport off */
                for_each_node_by_name(np, "radio") {
-                       if (np && np->parent == macio_chips[0].of_node) {
+                       if (np->parent == macio_chips[0].of_node) {
                                macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
                                core99_airport_enable(np, 0, 0);
                        }
                }
-               of_node_put(np);
        }
 
        /* On all machines that support sound PM, switch sound off */
index ab2027cdf8936ea47fdc6579ee6455b5af5330ae..3bc075c788ef3af25bcf06223098cffee2808942 100644 (file)
@@ -1155,13 +1155,11 @@ void __init pmac_pcibios_after_init(void)
                        pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
                }
        }
-       of_node_put(nd);
        for_each_node_by_name(nd, "ethernet") {
                if (nd->parent && of_device_is_compatible(nd, "gmac")
                    && of_device_is_compatible(nd->parent, "uni-north"))
                        pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
        }
-       of_node_put(nd);
 }
 
 void pmac_pci_fixup_cardbus(struct pci_dev* dev)
index 227c1c3d585e028874cc895afc3241afff0b3caf..72d8054fa739055ddc03521aaa6e0ff961ca3670 100644 (file)
@@ -129,20 +129,35 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
        struct property *property;
        struct property *last_property = NULL;
        struct cc_workarea *ccwa;
+       char *data_buf;
        int cc_token;
-       int rc;
+       int rc = -1;
 
        cc_token = rtas_token("ibm,configure-connector");
        if (cc_token == RTAS_UNKNOWN_SERVICE)
                return NULL;
 
-       spin_lock(&rtas_data_buf_lock);
-       ccwa = (struct cc_workarea *)&rtas_data_buf[0];
+       data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+       if (!data_buf)
+               return NULL;
+
+       ccwa = (struct cc_workarea *)&data_buf[0];
        ccwa->drc_index = drc_index;
        ccwa->zero = 0;
 
-       rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
-       while (rc) {
+       do {
+               /* Since we release the rtas_data_buf lock between configure
+                * connector calls we want to re-populate the rtas_data_buffer
+                * with the contents of the previous call.
+                */
+               spin_lock(&rtas_data_buf_lock);
+
+               memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
+               rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+               memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
+
+               spin_unlock(&rtas_data_buf_lock);
+
                switch (rc) {
                case NEXT_SIBLING:
                        dn = dlpar_parse_cc_node(ccwa);
@@ -197,18 +212,19 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
                               "returned from configure-connector\n", rc);
                        goto cc_error;
                }
+       } while (rc);
 
-               rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+cc_error:
+       kfree(data_buf);
+
+       if (rc) {
+               if (first_dn)
+                       dlpar_free_cc_nodes(first_dn);
+
+               return NULL;
        }
 
-       spin_unlock(&rtas_data_buf_lock);
        return first_dn;
-
-cc_error:
-       if (first_dn)
-               dlpar_free_cc_nodes(first_dn);
-       spin_unlock(&rtas_data_buf_lock);
-       return NULL;
 }
 
 static struct device_node *derive_parent(const char *path)
index 395848e30c523b36f7b31bbd258610777e87b4dd..a77bcaed80af8fa1c037444bf0aac0b0f5f0e1ea 100644 (file)
@@ -403,7 +403,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
        pci->phb->dma_window_size = 0x8000000ul;
        pci->phb->dma_window_base_cur = 0x8000000ul;
 
-       tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+       tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                           pci->phb->node);
 
        iommu_table_setparms(pci->phb, dn, tbl);
@@ -448,7 +448,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
                 pdn->full_name, ppci->iommu_table);
 
        if (!ppci->iommu_table) {
-               tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+               tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   ppci->phb->node);
                iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
                        bus->number);
@@ -478,7 +478,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
                struct pci_controller *phb = PCI_DN(dn)->phb;
 
                pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
-               tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+               tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   phb->node);
                iommu_table_setparms(phb, dn, tbl);
                PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
@@ -544,7 +544,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
 
        pci = PCI_DN(pdn);
        if (!pci->iommu_table) {
-               tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+               tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   pci->phb->node);
                iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
                        pci->phb->bus->number);
index 3b1bf61c45bebda01f084e9229a2c764adb48b2f..0317cce877c647c28e66525dffa24ff1987deedf 100644 (file)
@@ -182,10 +182,13 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
        /* Special case - we inhibit secondary thread startup
         * during boot if the user requests it.
         */
-       if (system_state < SYSTEM_RUNNING &&
-           cpu_has_feature(CPU_FTR_SMT) &&
-           !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
-               return 0;
+       if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
+               if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+                       return 0;
+               if (smt_enabled_at_boot
+                   && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
+                       return 0;
+       }
 
        return 1;
 }
index 5b22b07c8f67aa19da32832c6b553220445489ee..93834b0d8272231f5c9376784bc5e82bd98c9e38 100644 (file)
@@ -928,8 +928,10 @@ void xics_migrate_irqs_away(void)
                if (xics_status[0] != hw_cpu)
                        goto unlock;
 
-               printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
-                      virq, cpu);
+               /* This is expected during cpu offline. */
+               if (cpu_online(cpu))
+                       printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
+                              virq, cpu);
 
                /* Reset affinity to all cpus */
                cpumask_setall(irq_to_desc(virq)->affinity);
index 209384b6e0396e232be5c0a53ed0519d3b5e6a3b..4ae933225251e0ae7312ea55f4e00732fc899911 100644 (file)
@@ -399,6 +399,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
index 6425abe5b7dbfe809cd41d8a06634a56367d9d1d..3017532319c8d0df73ba36e3858c5368c17cccaf 100644 (file)
@@ -240,12 +240,13 @@ struct rio_priv {
 
 static void __iomem *rio_regs_win;
 
+#ifdef CONFIG_E500
 static int (*saved_mcheck_exception)(struct pt_regs *regs);
 
 static int fsl_rio_mcheck_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *entry = NULL;
-       unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
+       unsigned long reason = mfspr(SPRN_MCSR);
 
        if (reason & MCSR_BUS_RBERR) {
                reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
@@ -269,6 +270,7 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
        else
                return cur_cpu_spec->machine_check(regs);
 }
+#endif
 
 /**
  * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
@@ -1517,8 +1519,10 @@ int fsl_rio_setup(struct platform_device *dev)
        fsl_rio_doorbell_init(port);
        fsl_rio_port_write_init(port);
 
+#ifdef CONFIG_E500
        saved_mcheck_exception = ppc_md.machine_check_exception;
        ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
+#endif
        /* Ensure that RFXE is set */
        mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
 
index 3da8014931c9c7a99afcebf52814b50d03d02f35..90020de4dcf2c7570e371569997506e5642c8c5f 100644 (file)
@@ -640,6 +640,7 @@ unsigned int qe_get_num_of_snums(void)
                if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
                        /* No QE ever has fewer than 28 SNUMs */
                        pr_err("QE: number of snum is invalid\n");
+                       of_node_put(qe);
                        return -EINVAL;
                }
        }
index 0554445200bfd1417a1fe6076b0d258b74e69a02..d17d04cfb2cd4095c77adb306fcb690161c09fbf 100644 (file)
@@ -2880,15 +2880,14 @@ static void xmon_init(int enable)
 }
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key, struct tty_struct *tty) 
+static void sysrq_handle_xmon(int key)
 {
        /* ensure xmon is enabled */
        xmon_init(1);
        debugger(get_irq_regs());
 }
 
-static struct sysrq_key_op sysrq_xmon_op = 
-{
+static struct sysrq_key_op sysrq_xmon_op = {
        .handler =      sysrq_handle_xmon,
        .help_msg =     "Xmon",
        .action_msg =   "Entering xmon",
index 670a1d1745d271e6e89a8246e6432c0c281b3d94..bb8343d157bc18b6117d02aa4ef164039b06f108 100644 (file)
@@ -97,6 +97,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 {
        pte_t pte = huge_ptep_get(ptep);
 
+       mm->context.flush_mm = 1;
        pmd_clear((pmd_t *) ptep);
        return pte;
 }
@@ -167,7 +168,8 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
 ({                                                                     \
        pte_t __pte = huge_ptep_get(__ptep);                            \
        if (pte_write(__pte)) {                                         \
-               if (atomic_read(&(__mm)->mm_users) > 1 ||               \
+               (__mm)->context.flush_mm = 1;                           \
+               if (atomic_read(&(__mm)->context.attach_count) > 1 ||   \
                    (__mm) != current->active_mm)                       \
                        huge_ptep_invalidate(__mm, __addr, __ptep);     \
                set_huge_pte_at(__mm, __addr, __ptep,                   \
index 99e3409102b9d955960ad9c75e1dba563358b21a..78522cdefdd42334c7b4f72be1180a6a2ed2f327 100644 (file)
@@ -2,6 +2,8 @@
 #define __MMU_H
 
 typedef struct {
+       atomic_t attach_count;
+       unsigned int flush_mm;
        spinlock_t list_lock;
        struct list_head crst_list;
        struct list_head pgtable_list;
index 976e273988c2ab2147722d46e76372f00e7ece10..a6f0e7cc9cde2f341fba3fef347ea93240bb9372 100644 (file)
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
+#include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
 
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       atomic_set(&mm->context.attach_count, 0);
+       mm->context.flush_mm = 0;
        mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
 #ifdef CONFIG_64BIT
        mm->context.asce_bits |= _ASCE_TYPE_REGION3;
@@ -76,6 +79,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
        update_mm(next, tsk);
+       atomic_dec(&prev->context.attach_count);
+       WARN_ON(atomic_read(&prev->context.attach_count) < 0);
+       atomic_inc(&next->context.attach_count);
+       /* Check for TLBs not flushed yet */
+       if (next->context.flush_mm)
+               __tlb_flush_mm(next);
 }
 
 #define enter_lazy_tlb(mm,tsk) do { } while (0)
index 89a504c3f12ec13f89fdcb34ab2b3602ce0c75a0..3157441ee1da287345a1f1a6bfe79769939bd7d7 100644 (file)
@@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm,
 #define ptep_get_and_clear(__mm, __address, __ptep)                    \
 ({                                                                     \
        pte_t __pte = *(__ptep);                                        \
-       if (atomic_read(&(__mm)->mm_users) > 1 ||                       \
+       (__mm)->context.flush_mm = 1;                                   \
+       if (atomic_read(&(__mm)->context.attach_count) > 1 ||           \
            (__mm) != current->active_mm)                               \
                ptep_invalidate(__mm, __address, __ptep);               \
        else                                                            \
@@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 ({                                                                     \
        pte_t __pte = *(__ptep);                                        \
        if (pte_write(__pte)) {                                         \
-               if (atomic_read(&(__mm)->mm_users) > 1 ||               \
+               (__mm)->context.flush_mm = 1;                           \
+               if (atomic_read(&(__mm)->context.attach_count) > 1 ||   \
                    (__mm) != current->active_mm)                       \
                        ptep_invalidate(__mm, __addr, __ptep);          \
                set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
index 81150b0536890b7d664052a33f22d397bb05b852..fd1c00d08bf57ca1ead5095746ba082847f52752 100644 (file)
@@ -50,8 +50,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
 
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
-               (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
+       tlb->fullmm = full_mm_flush;
        tlb->nr_ptes = 0;
        tlb->nr_pxds = TLB_NR_PTRS;
        if (tlb->fullmm)
index 304cffa623e158041e128c738999abbed1d3aa5b..29d5d6d4becc7e12a4a5a0b9801a0de554c44b68 100644 (file)
@@ -94,8 +94,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
 
 static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
 {
-       if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
+       spin_lock(&mm->page_table_lock);
+       if (mm->context.flush_mm) {
                __tlb_flush_mm(mm);
+               mm->context.flush_mm = 0;
+       }
+       spin_unlock(&mm->page_table_lock);
 }
 
 /*
index 403fb430a896104b0c5f7982a9dd76ecf80d5e95..ff579b6bde066e35b306669039773733d61509ef 100644 (file)
@@ -42,8 +42,8 @@ long sys_clone(unsigned long newsp, unsigned long clone_flags,
               int __user *parent_tidptr, int __user *child_tidptr);
 long sys_vfork(void);
 void execve_tail(void);
-long sys_execve(const char __user *name, char __user * __user *argv,
-               char __user * __user *envp);
+long sys_execve(const char __user *name, const char __user *const __user *argv,
+               const char __user *const __user *envp);
 long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
 long sys_sigaction(int sig, const struct old_sigaction __user *act,
                   struct old_sigaction __user *oact);
index 541053ed234ea1c0cd8a893de5bf16c9c30d9022..8127ebd59c4d81f4e8368e7728ad49bf3a173900 100644 (file)
@@ -583,6 +583,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
        sf->gprs[9] = (unsigned long) sf;
        cpu_lowcore->save_area[15] = (unsigned long) sf;
        __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
+       atomic_inc(&init_mm.context.attach_count);
        asm volatile(
                "       stam    0,15,0(%0)"
                : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
@@ -659,6 +660,7 @@ void __cpu_die(unsigned int cpu)
        while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
                udelay(10);
        smp_free_lowcore(cpu);
+       atomic_dec(&init_mm.context.attach_count);
        pr_info("Processor %d stopped\n", cpu);
 }
 
index acc91c75bc94a7e4a3b8f87600cbe312c4bdcc90..30eb6d02ddb89d59bf11d57ecbf743823cc44087 100644 (file)
@@ -74,6 +74,8 @@ void __init paging_init(void)
        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
        __raw_local_irq_ssm(ssm_mask);
 
+       atomic_set(&init_mm.context.attach_count, 1);
+
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
        sparse_init();
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
index f0c74227c737e0a94efb33c95640375ad38c89bc..bdb2ff880bdd68f58c0b4357479f31bd32ff19d9 100644 (file)
 #define atomic64_set(v, i)     (((v)->counter) = i)
 
 extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(int, atomic64_t *);
+extern void atomic64_add(long, atomic64_t *);
 extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(int, atomic64_t *);
+extern void atomic64_sub(long, atomic64_t *);
 
 extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(int, atomic64_t *);
+extern long atomic64_add_ret(long, atomic64_t *);
 extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(int, atomic64_t *);
+extern long atomic64_sub_ret(long, atomic64_t *);
 
 #define atomic_dec_return(v) atomic_sub_ret(1, v)
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
index fa1fdf67e350e07d950107289f25bdf3c41a73b3..db3af0d30fb10129124c594d1f1195bb59f2a6f2 100644 (file)
@@ -8,6 +8,9 @@
 #define BACKOFF_SETUP(reg)     \
        mov     1, reg
 
+#define BACKOFF_LABEL(spin_label, continue_label) \
+       spin_label
+
 #define BACKOFF_SPIN(reg, tmp, label)  \
        mov     reg, tmp; \
 88:    brnz,pt tmp, 88b; \
 #else
 
 #define BACKOFF_SETUP(reg)
-#define BACKOFF_SPIN(reg, tmp, label) \
-       ba,pt   %xcc, label; \
-        nop;
+
+#define BACKOFF_LABEL(spin_label, continue_label) \
+       continue_label
+
+#define BACKOFF_SPIN(reg, tmp, label)
 
 #endif
 
index a5db0317b5fbfecc523e82b7a27172252cb1f4a2..3e0b2d62303df55133d1f9547e8c984ba5dbbeb2 100644 (file)
@@ -185,9 +185,8 @@ extern int prom_getunumber(int syndrome_code,
                           char *buf, int buflen);
 
 /* Retain physical memory to the caller across soft resets. */
-extern unsigned long prom_retain(const char *name,
-                                unsigned long pa_low, unsigned long pa_high,
-                                long size, long align);
+extern int prom_retain(const char *name, unsigned long size,
+                      unsigned long align, unsigned long *paddr);
 
 /* Load explicit I/D TLB entries into the calling processor. */
 extern long prom_itlb_load(unsigned long index,
@@ -287,26 +286,6 @@ extern void prom_sun4v_guest_soft_state(void);
 extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
 
 /* Client interface level routines. */
-extern long p1275_cmd(const char *, long, ...);
-
-#if 0
-#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
-#else
-#define P1275_SIZE(x) x
-#endif
-
-/* We support at most 16 input and 1 output argument */
-#define P1275_ARG_NUMBER               0
-#define P1275_ARG_IN_STRING            1
-#define P1275_ARG_OUT_BUF              2
-#define P1275_ARG_OUT_32B              3
-#define P1275_ARG_IN_FUNCTION          4
-#define P1275_ARG_IN_BUF               5
-#define P1275_ARG_IN_64B               6
-
-#define P1275_IN(x) ((x) & 0xf)
-#define P1275_OUT(x) (((x) << 4) & 0xf0)
-#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
-#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
+extern void p1275_cmd_direct(unsigned long *);
 
 #endif /* !(__SPARC64_OPLIB_H) */
diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h
deleted file mode 100644 (file)
index e4c61a1..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* rwsem-const.h: RW semaphore counter constants.  */
-#ifndef _SPARC64_RWSEM_CONST_H
-#define _SPARC64_RWSEM_CONST_H
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-#endif /* _SPARC64_RWSEM_CONST_H */
index 6e5621006f85114d3356090b1ce66681021375a9..a2b4302869bcfb8ffe1d54ee3282293a18aeae83 100644 (file)
 
 #include <linux/list.h>
 #include <linux/spinlock.h>
-#include <asm/rwsem-const.h>
 
 struct rwsem_waiter;
 
 struct rw_semaphore {
-       signed int count;
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
+       signed long                     count;
+#define RWSEM_UNLOCKED_VALUE           0x00000000L
+#define RWSEM_ACTIVE_BIAS              0x00000001L
+#define RWSEM_ACTIVE_MASK              0xffffffffL
+#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+       spinlock_t                      wait_lock;
+       struct list_head                wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map      dep_map;
+       struct lockdep_map              dep_map;
 #endif
 };
 
@@ -41,6 +46,11 @@ struct rw_semaphore {
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
                         struct lock_class_key *key);
 
@@ -51,27 +61,103 @@ do {                                                               \
        __init_rwsem((sem), #sem, &__key);                      \
 } while (0)
 
-extern void __down_read(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
+               rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       while ((tmp = sem->count) >= 0L) {
+               if (tmp == cmpxchg(&sem->count, tmp,
+                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
 
+/*
+ * lock for writing
+ */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       __down_write(sem);
+       long tmp;
+
+       tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                 (atomic64_t *)(&sem->count));
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               rwsem_down_write_failed(sem);
 }
 
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline void __down_write(struct rw_semaphore *sem)
 {
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
+       __down_write_nested(sem, 0);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+                     RWSEM_ACTIVE_WRITE_BIAS);
+       return tmp == RWSEM_UNLOCKED_VALUE;
 }
 
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
+       if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
+               rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                        (atomic64_t *)(&sem->count)) < 0L))
+               rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+{
+       atomic64_add(delta, (atomic64_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
+       if (tmp < 0L)
+               rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       atomic_add(delta, (atomic_t *)(&sem->count));
+       return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
 }
 
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
index d24cfe16afc1babc4feb78f0c697555754304346..e3b65d8cf41b715bb71b9339c9184e42feea01d0 100644 (file)
@@ -106,6 +106,7 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
  */
 #define write_pic(__p)                                         \
        __asm__ __volatile__("ba,pt     %%xcc, 99f\n\t"         \
+                            " nop\n\t"                         \
                             ".align    64\n"                   \
                          "99:wr        %0, 0x0, %%pic\n\t"     \
                             "rd        %%pic, %%g0" : : "r" (__p))
index 485f547483847da14aebfaa0e327c6cbb75dce24..c158a95ec664f1301d78ceb433d99a1c94098dbc 100644 (file)
@@ -303,7 +303,7 @@ void arch_trigger_all_cpu_backtrace(void)
 
 #ifdef CONFIG_MAGIC_SYSRQ
 
-static void sysrq_handle_globreg(int key, struct tty_struct *tty)
+static void sysrq_handle_globreg(int key)
 {
        arch_trigger_all_cpu_backtrace();
 }
index c4b5e03af11557efa6ae42aa11f9ce0d74dfa42a..846d1c4374ea9c0c24a64bc4088185388274afd3 100644 (file)
@@ -15,7 +15,7 @@ lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-y                 += atomic_$(BITS).o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
-lib-y                 += rwsem_$(BITS).o
+lib-$(CONFIG_SPARC32) += rwsem_32.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
index 0268210ca1683f7bd0bb018953d93a22e207c701..59186e0fcf398feb5880b37fa28624230e0c756b 100644 (file)
@@ -21,7 +21,7 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -36,7 +36,7 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -51,11 +51,10 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
-        add    %g7, %o0, %g7
-       sra     %g7, 0, %o0
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+        add    %g1, %o0, %g1
        retl
-        nop
+        sra    %g1, 0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic_add_ret, .-atomic_add_ret
 
@@ -67,11 +66,10 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
-        sub    %g7, %o0, %g7
-       sra     %g7, 0, %o0
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+        sub    %g1, %o0, %g1
        retl
-        nop
+        sra    %g1, 0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic_sub_ret, .-atomic_sub_ret
 
@@ -83,7 +81,7 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -98,7 +96,7 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -113,11 +111,10 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
-        add    %g7, %o0, %g7
-       mov     %g7, %o0
-       retl
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
+       retl
+        add    %g1, %o0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic64_add_ret, .-atomic64_add_ret
 
@@ -129,10 +126,9 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
-        sub    %g7, %o0, %g7
-       mov     %g7, %o0
-       retl
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
+       retl
+        sub    %g1, %o0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic64_sub_ret, .-atomic64_sub_ret
index 2b7228cb8c2209332000d429257f16f4dbcf730f..3dc61d5537c08a6ad56f37b90df340a527298bb7 100644 (file)
@@ -22,7 +22,7 @@ test_and_set_bit:     /* %o0=nr, %o1=addr */
        or      %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
@@ -45,7 +45,7 @@ test_and_clear_bit:   /* %o0=nr, %o1=addr */
        andn    %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
@@ -68,7 +68,7 @@ test_and_change_bit:  /* %o0=nr, %o1=addr */
        xor     %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
@@ -91,7 +91,7 @@ set_bit:              /* %o0=nr, %o1=addr */
        or      %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -112,7 +112,7 @@ clear_bit:          /* %o0=nr, %o1=addr */
        andn    %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -133,7 +133,7 @@ change_bit:         /* %o0=nr, %o1=addr */
        xor     %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
deleted file mode 100644 (file)
index 91a7d29..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-/* rwsem.S: RW semaphore assembler.
- *
- * Written by David S. Miller (davem@redhat.com), 2001.
- * Derived from asm-i386/rwsem.h
- */
-
-#include <asm/rwsem-const.h>
-
-       .section        .sched.text, "ax"
-
-       .globl          __down_read
-__down_read:
-1:     lduw            [%o0], %g1
-       add             %g1, 1, %g7
-       cas             [%o0], %g1, %g7
-       cmp             %g1, %g7
-       bne,pn          %icc, 1b
-        add            %g7, 1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:
-       retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_down_read_failed
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __down_read, .-__down_read
-
-       .globl          __down_read_trylock
-__down_read_trylock:
-1:     lduw            [%o0], %g1
-       add             %g1, 1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 2f
-        mov            0, %o1
-       cas             [%o0], %g1, %g7
-       cmp             %g1, %g7
-       bne,pn          %icc, 1b
-        mov            1, %o1
-2:     retl
-        mov            %o1, %o0
-       .size           __down_read_trylock, .-__down_read_trylock
-
-       .globl          __down_write
-__down_write:
-       sethi           %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
-       or              %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       add             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        cmp            %g7, 0
-       bne,pn          %icc, 3f
-        nop
-2:     retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_down_write_failed
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __down_write, .-__down_write
-
-       .globl          __down_write_trylock
-__down_write_trylock:
-       sethi           %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
-       or              %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       cmp             %g3, 0
-       bne,pn          %icc, 2f
-        mov            0, %o1
-       add             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        mov            1, %o1
-2:     retl
-        mov            %o1, %o0
-       .size           __down_write_trylock, .-__down_write_trylock
-
-       .globl          __up_read
-__up_read:
-1:
-       lduw            [%o0], %g1
-       sub             %g1, 1, %g7
-       cas             [%o0], %g1, %g7
-       cmp             %g1, %g7
-       bne,pn          %icc, 1b
-        cmp            %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:     retl
-        nop
-3:     sethi           %hi(RWSEM_ACTIVE_MASK), %g1
-       sub             %g7, 1, %g7
-       or              %g1, %lo(RWSEM_ACTIVE_MASK), %g1
-       andcc           %g7, %g1, %g0
-       bne,pn          %icc, 2b
-        nop
-       save            %sp, -192, %sp
-       call            rwsem_wake
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __up_read, .-__up_read
-
-       .globl          __up_write
-__up_write:
-       sethi           %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
-       or              %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       sub             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        sub            %g7, %g1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:
-       retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_wake
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __up_write, .-__up_write
-
-       .globl          __downgrade_write
-__downgrade_write:
-       sethi           %hi(RWSEM_WAITING_BIAS), %g1
-       or              %g1, %lo(RWSEM_WAITING_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       sub             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        sub            %g7, %g1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:
-       retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_downgrade_wake
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __downgrade_write, .-__downgrade_write
index 5f27ad779c0c578097218e44b005eaed770acf7d..9c86b4b7d4290b75a5967c28790c59b3ee24b5c9 100644 (file)
@@ -9,18 +9,18 @@
 #include <asm/thread_info.h>
 
        .text
-       .globl  prom_cif_interface
-prom_cif_interface:
-       sethi   %hi(p1275buf), %o0
-       or      %o0, %lo(p1275buf), %o0
-       ldx     [%o0 + 0x010], %o1      ! prom_cif_stack
-       save    %o1, -192, %sp
-       ldx     [%i0 + 0x008], %l2      ! prom_cif_handler
+       .globl  prom_cif_direct
+prom_cif_direct:
+       sethi   %hi(p1275buf), %o1
+       or      %o1, %lo(p1275buf), %o1
+       ldx     [%o1 + 0x0010], %o2     ! prom_cif_stack
+       save    %o2, -192, %sp
+       ldx     [%i1 + 0x0008], %l2     ! prom_cif_handler
        mov     %g4, %l0
        mov     %g5, %l1
        mov     %g6, %l3
        call    %l2
-        add    %i0, 0x018, %o0         ! prom_args
+        mov    %i0, %o0                ! prom_args
        mov     %l0, %g4
        mov     %l1, %g5
        mov     %l3, %g6
index f55d58a8a1567a653f8517598edcfe7f777b4257..10322dc2f557a22d5befcc17dd4a27d8b6604f89 100644 (file)
@@ -21,14 +21,22 @@ extern int prom_stdin, prom_stdout;
 inline int
 prom_nbgetchar(void)
 {
+       unsigned long args[7];
        char inc;
 
-       if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
-                             P1275_INOUT(3,1),
-                             prom_stdin, &inc, P1275_SIZE(1)) == 1)
+       args[0] = (unsigned long) "read";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) prom_stdin;
+       args[4] = (unsigned long) &inc;
+       args[5] = 1;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[6] == 1)
                return inc;
-       else
-               return -1;
+       return -1;
 }
 
 /* Non blocking put character to console device, returns -1 if
@@ -37,12 +45,22 @@ prom_nbgetchar(void)
 inline int
 prom_nbputchar(char c)
 {
+       unsigned long args[7];
        char outc;
        
        outc = c;
-       if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
-                              P1275_INOUT(3,1),
-                              prom_stdout, &outc, P1275_SIZE(1)) == 1)
+
+       args[0] = (unsigned long) "write";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) prom_stdout;
+       args[4] = (unsigned long) &outc;
+       args[5] = 1;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[6] == 1)
                return 0;
        else
                return -1;
@@ -67,7 +85,15 @@ prom_putchar(char c)
 void
 prom_puts(const char *s, int len)
 {
-       p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
-                          P1275_INOUT(3,1),
-                          prom_stdout, s, P1275_SIZE(len));
+       unsigned long args[7];
+
+       args[0] = (unsigned long) "write";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) prom_stdout;
+       args[4] = (unsigned long) s;
+       args[5] = len;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
 }
index 9dbd803e46e1f4c460300c6f5f2485f3d6c54e7d..a017119e7ef17c40ee8fa8328252f921c59a6ca8 100644 (file)
 int
 prom_devopen(const char *dstr)
 {
-       return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
-                                 P1275_INOUT(1,1),
-                                 dstr);
+       unsigned long args[5];
+
+       args[0] = (unsigned long) "open";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) dstr;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[4];
 }
 
 /* Close the device described by device handle 'dhandle'. */
 int
 prom_devclose(int dhandle)
 {
-       p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "close";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned int) dhandle;
+
+       p1275_cmd_direct(args);
+
        return 0;
 }
 
@@ -37,5 +53,15 @@ prom_devclose(int dhandle)
 void
 prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
 {
-       p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
+       unsigned long args[7];
+
+       args[0] = (unsigned long) "seek";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) dhandle;
+       args[4] = seekhi;
+       args[5] = seeklo;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
 }
index 39fc6af21b7c55ddc0e752c80c745ed6c018820a..6cb1581d6aef507fca9c036708cebe24e5ab053c 100644 (file)
 
 int prom_service_exists(const char *service_name)
 {
-       int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
-                           P1275_INOUT(1, 1), service_name);
+       unsigned long args[5];
 
-       if (err)
+       args[0] = (unsigned long) "test";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) service_name;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[4])
                return 0;
        return 1;
 }
@@ -31,30 +38,47 @@ int prom_service_exists(const char *service_name)
 void prom_sun4v_guest_soft_state(void)
 {
        const char *svc = "SUNW,soft-state-supported";
+       unsigned long args[3];
 
        if (!prom_service_exists(svc))
                return;
-       p1275_cmd(svc, P1275_INOUT(0, 0));
+       args[0] = (unsigned long) svc;
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 /* Reset and reboot the machine with the command 'bcommand'. */
 void prom_reboot(const char *bcommand)
 {
+       unsigned long args[4];
+
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_reboot(bcommand);
 #endif
-       p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
-                 P1275_INOUT(1, 0), bcommand);
+       args[0] = (unsigned long) "boot";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned long) bcommand;
+
+       p1275_cmd_direct(args);
 }
 
 /* Forth evaluate the expression contained in 'fstring'. */
 void prom_feval(const char *fstring)
 {
+       unsigned long args[5];
+
        if (!fstring || fstring[0] == 0)
                return;
-       p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
-                 P1275_INOUT(1, 1), fstring);
+       args[0] = (unsigned long) "interpret";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) fstring;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
 }
 EXPORT_SYMBOL(prom_feval);
 
@@ -68,6 +92,7 @@ extern void smp_release(void);
  */
 void prom_cmdline(void)
 {
+       unsigned long args[3];
        unsigned long flags;
 
        local_irq_save(flags);
@@ -76,7 +101,11 @@ void prom_cmdline(void)
        smp_capture();
 #endif
 
-       p1275_cmd("enter", P1275_INOUT(0, 0));
+       args[0] = (unsigned long) "enter";
+       args[1] = 0;
+       args[2] = 0;
+
+       p1275_cmd_direct(args);
 
 #ifdef CONFIG_SMP
        smp_release();
@@ -90,22 +119,32 @@ void prom_cmdline(void)
  */
 void notrace prom_halt(void)
 {
+       unsigned long args[3];
+
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_power_off();
 #endif
 again:
-       p1275_cmd("exit", P1275_INOUT(0, 0));
+       args[0] = (unsigned long) "exit";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
        goto again; /* PROM is out to get me -DaveM */
 }
 
 void prom_halt_power_off(void)
 {
+       unsigned long args[3];
+
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_power_off();
 #endif
-       p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
+       args[0] = (unsigned long) "SUNW,power-off";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 
        /* if nothing else helps, we just halt */
        prom_halt();
@@ -114,10 +153,15 @@ void prom_halt_power_off(void)
 /* Set prom sync handler to call function 'funcp'. */
 void prom_setcallback(callback_func_t funcp)
 {
+       unsigned long args[5];
        if (!funcp)
                return;
-       p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
-                 P1275_INOUT(1, 1), funcp);
+       args[0] = (unsigned long) "set-callback";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) funcp;
+       args[4] = (unsigned long) -1;
+       p1275_cmd_direct(args);
 }
 
 /* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
@@ -173,57 +217,61 @@ static int prom_get_memory_ihandle(void)
 }
 
 /* Load explicit I/D TLB entries. */
+static long tlb_load(const char *type, unsigned long index,
+                    unsigned long tte_data, unsigned long vaddr)
+{
+       unsigned long args[9];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 5;
+       args[2] = 1;
+       args[3] = (unsigned long) type;
+       args[4] = (unsigned int) prom_get_mmu_ihandle();
+       args[5] = vaddr;
+       args[6] = tte_data;
+       args[7] = index;
+       args[8] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (long) args[8];
+}
+
 long prom_itlb_load(unsigned long index,
                    unsigned long tte_data,
                    unsigned long vaddr)
 {
-       return p1275_cmd(prom_callmethod_name,
-                        (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                         P1275_ARG(2, P1275_ARG_IN_64B) |
-                         P1275_ARG(3, P1275_ARG_IN_64B) |
-                         P1275_INOUT(5, 1)),
-                        "SUNW,itlb-load",
-                        prom_get_mmu_ihandle(),
-                        /* And then our actual args are pushed backwards. */
-                        vaddr,
-                        tte_data,
-                        index);
+       return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
 }
 
 long prom_dtlb_load(unsigned long index,
                    unsigned long tte_data,
                    unsigned long vaddr)
 {
-       return p1275_cmd(prom_callmethod_name,
-                        (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                         P1275_ARG(2, P1275_ARG_IN_64B) |
-                         P1275_ARG(3, P1275_ARG_IN_64B) |
-                         P1275_INOUT(5, 1)),
-                        "SUNW,dtlb-load",
-                        prom_get_mmu_ihandle(),
-                        /* And then our actual args are pushed backwards. */
-                        vaddr,
-                        tte_data,
-                        index);
+       return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
 }
 
 int prom_map(int mode, unsigned long size,
             unsigned long vaddr, unsigned long paddr)
 {
-       int ret = p1275_cmd(prom_callmethod_name,
-                           (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                            P1275_ARG(3, P1275_ARG_IN_64B) |
-                            P1275_ARG(4, P1275_ARG_IN_64B) |
-                            P1275_ARG(6, P1275_ARG_IN_64B) |
-                            P1275_INOUT(7, 1)),
-                           prom_map_name,
-                           prom_get_mmu_ihandle(),
-                           mode,
-                           size,
-                           vaddr,
-                           0,
-                           paddr);
-
+       unsigned long args[11];
+       int ret;
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 7;
+       args[2] = 1;
+       args[3] = (unsigned long) prom_map_name;
+       args[4] = (unsigned int) prom_get_mmu_ihandle();
+       args[5] = (unsigned int) mode;
+       args[6] = size;
+       args[7] = vaddr;
+       args[8] = 0;
+       args[9] = paddr;
+       args[10] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       ret = (int) args[10];
        if (ret == 0)
                ret = -1;
        return ret;
@@ -231,40 +279,51 @@ int prom_map(int mode, unsigned long size,
 
 void prom_unmap(unsigned long size, unsigned long vaddr)
 {
-       p1275_cmd(prom_callmethod_name,
-                 (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                  P1275_ARG(2, P1275_ARG_IN_64B) |
-                  P1275_ARG(3, P1275_ARG_IN_64B) |
-                  P1275_INOUT(4, 0)),
-                 prom_unmap_name,
-                 prom_get_mmu_ihandle(),
-                 size,
-                 vaddr);
+       unsigned long args[7];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 4;
+       args[2] = 0;
+       args[3] = (unsigned long) prom_unmap_name;
+       args[4] = (unsigned int) prom_get_mmu_ihandle();
+       args[5] = size;
+       args[6] = vaddr;
+
+       p1275_cmd_direct(args);
 }
 
 /* Set aside physical memory which is not touched or modified
  * across soft resets.
  */
-unsigned long prom_retain(const char *name,
-                         unsigned long pa_low, unsigned long pa_high,
-                         long size, long align)
+int prom_retain(const char *name, unsigned long size,
+               unsigned long align, unsigned long *paddr)
 {
-       /* XXX I don't think we return multiple values correctly.
-        * XXX OBP supposedly returns pa_low/pa_high here, how does
-        * XXX it work?
+       unsigned long args[11];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 5;
+       args[2] = 3;
+       args[3] = (unsigned long) "SUNW,retain";
+       args[4] = (unsigned int) prom_get_memory_ihandle();
+       args[5] = align;
+       args[6] = size;
+       args[7] = (unsigned long) name;
+       args[8] = (unsigned long) -1;
+       args[9] = (unsigned long) -1;
+       args[10] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[8])
+               return (int) args[8];
+
+       /* Next we get "phys_high" then "phys_low".  On 64-bit
+        * the phys_high cell is don't care since the phys_low
+        * cell has the full value.
         */
+       *paddr = args[10];
 
-       /* If align is zero, the pa_low/pa_high args are passed,
-        * else they are not.
-        */
-       if (align == 0)
-               return p1275_cmd("SUNW,retain",
-                                (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
-                                name, pa_low, pa_high, size, align);
-       else
-               return p1275_cmd("SUNW,retain",
-                                (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
-                                name, size, align);
+       return 0;
 }
 
 /* Get "Unumber" string for the SIMM at the given
@@ -277,62 +336,129 @@ int prom_getunumber(int syndrome_code,
                    unsigned long phys_addr,
                    char *buf, int buflen)
 {
-       return p1275_cmd(prom_callmethod_name,
-                        (P1275_ARG(0, P1275_ARG_IN_STRING)     |
-                         P1275_ARG(3, P1275_ARG_OUT_BUF)       |
-                         P1275_ARG(6, P1275_ARG_IN_64B)        |
-                         P1275_INOUT(8, 2)),
-                        "SUNW,get-unumber", prom_get_memory_ihandle(),
-                        buflen, buf, P1275_SIZE(buflen),
-                        0, phys_addr, syndrome_code);
+       unsigned long args[12];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 7;
+       args[2] = 2;
+       args[3] = (unsigned long) "SUNW,get-unumber";
+       args[4] = (unsigned int) prom_get_memory_ihandle();
+       args[5] = buflen;
+       args[6] = (unsigned long) buf;
+       args[7] = 0;
+       args[8] = phys_addr;
+       args[9] = (unsigned int) syndrome_code;
+       args[10] = (unsigned long) -1;
+       args[11] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[10];
 }
 
 /* Power management extensions. */
 void prom_sleepself(void)
 {
-       p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
+       unsigned long args[3];
+
+       args[0] = (unsigned long) "SUNW,sleep-self";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 int prom_sleepsystem(void)
 {
-       return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,sleep-system";
+       args[1] = 0;
+       args[2] = 1;
+       args[3] = (unsigned long) -1;
+       p1275_cmd_direct(args);
+
+       return (int) args[3];
 }
 
 int prom_wakeupsystem(void)
 {
-       return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,wakeup-system";
+       args[1] = 0;
+       args[2] = 1;
+       args[3] = (unsigned long) -1;
+       p1275_cmd_direct(args);
+
+       return (int) args[3];
 }
 
 #ifdef CONFIG_SMP
 void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
 {
-       p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
+       unsigned long args[6];
+
+       args[0] = (unsigned long) "SUNW,start-cpu";
+       args[1] = 3;
+       args[2] = 0;
+       args[3] = (unsigned int) cpunode;
+       args[4] = pc;
+       args[5] = arg;
+       p1275_cmd_direct(args);
 }
 
 void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
 {
-       p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
-                 cpuid, pc, arg);
+       unsigned long args[6];
+
+       args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid";
+       args[1] = 3;
+       args[2] = 0;
+       args[3] = (unsigned int) cpuid;
+       args[4] = pc;
+       args[5] = arg;
+       p1275_cmd_direct(args);
 }
 
 void prom_stopcpu_cpuid(int cpuid)
 {
-       p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
-                 cpuid);
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned int) cpuid;
+       p1275_cmd_direct(args);
 }
 
 void prom_stopself(void)
 {
-       p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
+       unsigned long args[3];
+
+       args[0] = (unsigned long) "SUNW,stop-self";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 void prom_idleself(void)
 {
-       p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
+       unsigned long args[3];
+
+       args[0] = (unsigned long) "SUNW,idle-self";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 void prom_resumecpu(int cpunode)
 {
-       p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,resume-cpu";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned int) cpunode;
+       p1275_cmd_direct(args);
 }
 #endif
index 2d8b70d397f154318612858c1bf7811160ee7f49..fa6e4e219b9ce436db25fcf3aa168d4b9f559ae2 100644 (file)
@@ -22,13 +22,11 @@ struct {
        long prom_callback;                     /* 0x00 */
        void (*prom_cif_handler)(long *);       /* 0x08 */
        unsigned long prom_cif_stack;           /* 0x10 */
-       unsigned long prom_args [23];           /* 0x18 */
-       char prom_buffer [3000];
 } p1275buf;
 
 extern void prom_world(int);
 
-extern void prom_cif_interface(void);
+extern void prom_cif_direct(unsigned long *args);
 extern void prom_cif_callback(void);
 
 /*
@@ -36,114 +34,20 @@ extern void prom_cif_callback(void);
  */
 DEFINE_RAW_SPINLOCK(prom_entry_lock);
 
-long p1275_cmd(const char *service, long fmt, ...)
+void p1275_cmd_direct(unsigned long *args)
 {
-       char *p, *q;
        unsigned long flags;
-       int nargs, nrets, i;
-       va_list list;
-       long attrs, x;
-       
-       p = p1275buf.prom_buffer;
 
        raw_local_save_flags(flags);
        raw_local_irq_restore(PIL_NMI);
        raw_spin_lock(&prom_entry_lock);
 
-       p1275buf.prom_args[0] = (unsigned long)p;               /* service */
-       strcpy (p, service);
-       p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
-       p1275buf.prom_args[1] = nargs = (fmt & 0x0f);           /* nargs */
-       p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4);    /* nrets */
-       attrs = fmt >> 8;
-       va_start(list, fmt);
-       for (i = 0; i < nargs; i++, attrs >>= 3) {
-               switch (attrs & 0x7) {
-               case P1275_ARG_NUMBER:
-                       p1275buf.prom_args[i + 3] =
-                                               (unsigned)va_arg(list, long);
-                       break;
-               case P1275_ARG_IN_64B:
-                       p1275buf.prom_args[i + 3] =
-                               va_arg(list, unsigned long);
-                       break;
-               case P1275_ARG_IN_STRING:
-                       strcpy (p, va_arg(list, char *));
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
-                       break;
-               case P1275_ARG_OUT_BUF:
-                       (void) va_arg(list, char *);
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       x = va_arg(list, long);
-                       i++; attrs >>= 3;
-                       p = (char *)(((long)(p + (int)x + 7)) & ~7);
-                       p1275buf.prom_args[i + 3] = x;
-                       break;
-               case P1275_ARG_IN_BUF:
-                       q = va_arg(list, char *);
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       x = va_arg(list, long);
-                       i++; attrs >>= 3;
-                       memcpy (p, q, (int)x);
-                       p = (char *)(((long)(p + (int)x + 7)) & ~7);
-                       p1275buf.prom_args[i + 3] = x;
-                       break;
-               case P1275_ARG_OUT_32B:
-                       (void) va_arg(list, char *);
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       p += 32;
-                       break;
-               case P1275_ARG_IN_FUNCTION:
-                       p1275buf.prom_args[i + 3] =
-                                       (unsigned long)prom_cif_callback;
-                       p1275buf.prom_callback = va_arg(list, long);
-                       break;
-               }
-       }
-       va_end(list);
-
        prom_world(1);
-       prom_cif_interface();
+       prom_cif_direct(args);
        prom_world(0);
 
-       attrs = fmt >> 8;
-       va_start(list, fmt);
-       for (i = 0; i < nargs; i++, attrs >>= 3) {
-               switch (attrs & 0x7) {
-               case P1275_ARG_NUMBER:
-                       (void) va_arg(list, long);
-                       break;
-               case P1275_ARG_IN_STRING:
-                       (void) va_arg(list, char *);
-                       break;
-               case P1275_ARG_IN_FUNCTION:
-                       (void) va_arg(list, long);
-                       break;
-               case P1275_ARG_IN_BUF:
-                       (void) va_arg(list, char *);
-                       (void) va_arg(list, long);
-                       i++; attrs >>= 3;
-                       break;
-               case P1275_ARG_OUT_BUF:
-                       p = va_arg(list, char *);
-                       x = va_arg(list, long);
-                       memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
-                       i++; attrs >>= 3;
-                       break;
-               case P1275_ARG_OUT_32B:
-                       p = va_arg(list, char *);
-                       memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
-                       break;
-               }
-       }
-       va_end(list);
-       x = p1275buf.prom_args [nargs + 3];
-
        raw_spin_unlock(&prom_entry_lock);
        raw_local_irq_restore(flags);
-
-       return x;
 }
 
 void prom_cif_init(void *cif_handler, void *cif_stack)
index 3c0d2dd9f6939faf1daf53f61141c785001429b4..9d3f9137a43ae1307ea698d81e13896af7504453 100644 (file)
 #include <asm/oplib.h>
 #include <asm/ldc.h>
 
+static int prom_node_to_node(const char *type, int node)
+{
+       unsigned long args[5];
+
+       args[0] = (unsigned long) type;
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[4];
+}
+
 /* Return the child of node 'node' or zero if no this node has no
  * direct descendent.
  */
 inline int __prom_getchild(int node)
 {
-       return p1275_cmd ("child", P1275_INOUT(1, 1), node);
+       return prom_node_to_node("child", node);
 }
 
 inline int prom_getchild(int node)
 {
        int cnode;
 
-       if(node == -1) return 0;
+       if (node == -1)
+               return 0;
        cnode = __prom_getchild(node);
-       if(cnode == -1) return 0;
-       return (int)cnode;
+       if (cnode == -1)
+               return 0;
+       return cnode;
 }
 EXPORT_SYMBOL(prom_getchild);
 
@@ -39,10 +56,12 @@ inline int prom_getparent(int node)
 {
        int cnode;
 
-       if(node == -1) return 0;
-       cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
-       if(cnode == -1) return 0;
-       return (int)cnode;
+       if (node == -1)
+               return 0;
+       cnode = prom_node_to_node("parent", node);
+       if (cnode == -1)
+               return 0;
+       return cnode;
 }
 
 /* Return the next sibling of node 'node' or zero if no more siblings
@@ -50,7 +69,7 @@ inline int prom_getparent(int node)
  */
 inline int __prom_getsibling(int node)
 {
-       return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
+       return prom_node_to_node(prom_peer_name, node);
 }
 
 inline int prom_getsibling(int node)
@@ -72,11 +91,21 @@ EXPORT_SYMBOL(prom_getsibling);
  */
 inline int prom_getproplen(int node, const char *prop)
 {
-       if((!node) || (!prop)) return -1;
-       return p1275_cmd ("getproplen", 
-                         P1275_ARG(1,P1275_ARG_IN_STRING)|
-                         P1275_INOUT(2, 1), 
-                         node, prop);
+       unsigned long args[6];
+
+       if (!node || !prop)
+               return -1;
+
+       args[0] = (unsigned long) "getproplen";
+       args[1] = 2;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) prop;
+       args[5] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[5];
 }
 EXPORT_SYMBOL(prom_getproplen);
 
@@ -87,19 +116,25 @@ EXPORT_SYMBOL(prom_getproplen);
 inline int prom_getproperty(int node, const char *prop,
                            char *buffer, int bufsize)
 {
+       unsigned long args[8];
        int plen;
 
        plen = prom_getproplen(node, prop);
-       if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
+       if ((plen > bufsize) || (plen == 0) || (plen == -1))
                return -1;
-       } else {
-               /* Ok, things seem all right. */
-               return p1275_cmd(prom_getprop_name, 
-                                P1275_ARG(1,P1275_ARG_IN_STRING)|
-                                P1275_ARG(2,P1275_ARG_OUT_BUF)|
-                                P1275_INOUT(4, 1), 
-                                node, prop, buffer, P1275_SIZE(plen));
-       }
+
+       args[0] = (unsigned long) prom_getprop_name;
+       args[1] = 4;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) prop;
+       args[5] = (unsigned long) buffer;
+       args[6] = bufsize;
+       args[7] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[7];
 }
 EXPORT_SYMBOL(prom_getproperty);
 
@@ -110,7 +145,7 @@ inline int prom_getint(int node, const char *prop)
 {
        int intprop;
 
-       if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+       if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
                return intprop;
 
        return -1;
@@ -126,7 +161,8 @@ int prom_getintdefault(int node, const char *property, int deflt)
        int retval;
 
        retval = prom_getint(node, property);
-       if(retval == -1) return deflt;
+       if (retval == -1)
+               return deflt;
 
        return retval;
 }
@@ -138,7 +174,8 @@ int prom_getbool(int node, const char *prop)
        int retval;
 
        retval = prom_getproplen(node, prop);
-       if(retval == -1) return 0;
+       if (retval == -1)
+               return 0;
        return 1;
 }
 EXPORT_SYMBOL(prom_getbool);
@@ -152,7 +189,8 @@ void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
        int len;
 
        len = prom_getproperty(node, prop, user_buf, ubuf_size);
-       if(len != -1) return;
+       if (len != -1)
+               return;
        user_buf[0] = 0;
 }
 EXPORT_SYMBOL(prom_getstring);
@@ -164,7 +202,8 @@ int prom_nodematch(int node, const char *name)
 {
        char namebuf[128];
        prom_getproperty(node, "name", namebuf, sizeof(namebuf));
-       if(strcmp(namebuf, name) == 0) return 1;
+       if (strcmp(namebuf, name) == 0)
+               return 1;
        return 0;
 }
 
@@ -190,16 +229,29 @@ int prom_searchsiblings(int node_start, const char *nodename)
 }
 EXPORT_SYMBOL(prom_searchsiblings);
 
+static const char *prom_nextprop_name = "nextprop";
+
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
 inline char *prom_firstprop(int node, char *buffer)
 {
+       unsigned long args[7];
+
        *buffer = 0;
-       if(node == -1) return buffer;
-       p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
-                              P1275_INOUT(3, 0), 
-                              node, (char *) 0x0, buffer);
+       if (node == -1)
+               return buffer;
+
+       args[0] = (unsigned long) prom_nextprop_name;
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = 0;
+       args[5] = (unsigned long) buffer;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
        return buffer;
 }
 EXPORT_SYMBOL(prom_firstprop);
@@ -210,9 +262,10 @@ EXPORT_SYMBOL(prom_firstprop);
  */
 inline char *prom_nextprop(int node, const char *oprop, char *buffer)
 {
+       unsigned long args[7];
        char buf[32];
 
-       if(node == -1) {
+       if (node == -1) {
                *buffer = 0;
                return buffer;
        }
@@ -220,10 +273,17 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer)
                strcpy (buf, oprop);
                oprop = buf;
        }
-       p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
-                                   P1275_ARG(2,P1275_ARG_OUT_32B)|
-                                   P1275_INOUT(3, 0), 
-                                   node, oprop, buffer); 
+
+       args[0] = (unsigned long) prom_nextprop_name;
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) oprop;
+       args[5] = (unsigned long) buffer;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
        return buffer;
 }
 EXPORT_SYMBOL(prom_nextprop);
@@ -231,12 +291,19 @@ EXPORT_SYMBOL(prom_nextprop);
 int
 prom_finddevice(const char *name)
 {
+       unsigned long args[5];
+
        if (!name)
                return 0;
-       return p1275_cmd(prom_finddev_name,
-                        P1275_ARG(0,P1275_ARG_IN_STRING)|
-                        P1275_INOUT(1, 1), 
-                        name);
+       args[0] = (unsigned long) "finddevice";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) name;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[4];
 }
 EXPORT_SYMBOL(prom_finddevice);
 
@@ -247,7 +314,7 @@ int prom_node_has_property(int node, const char *prop)
        *buf = 0;
        do {
                prom_nextprop(node, buf, buf);
-               if(!strcmp(buf, prop))
+               if (!strcmp(buf, prop))
                        return 1;
        } while (*buf);
        return 0;
@@ -260,6 +327,8 @@ EXPORT_SYMBOL(prom_node_has_property);
 int
 prom_setprop(int node, const char *pname, char *value, int size)
 {
+       unsigned long args[8];
+
        if (size == 0)
                return 0;
        if ((pname == 0) || (value == 0))
@@ -271,19 +340,37 @@ prom_setprop(int node, const char *pname, char *value, int size)
                return 0;
        }
 #endif
-       return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
-                                         P1275_ARG(2,P1275_ARG_IN_BUF)|
-                                         P1275_INOUT(4, 1), 
-                                         node, pname, value, P1275_SIZE(size));
+       args[0] = (unsigned long) "setprop";
+       args[1] = 4;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) pname;
+       args[5] = (unsigned long) value;
+       args[6] = size;
+       args[7] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[7];
 }
 EXPORT_SYMBOL(prom_setprop);
 
 inline int prom_inst2pkg(int inst)
 {
+       unsigned long args[5];
        int node;
        
-       node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
-       if (node == -1) return 0;
+       args[0] = (unsigned long) "instance-to-package";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned int) inst;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       node = (int) args[4];
+       if (node == -1)
+               return 0;
        return node;
 }
 
@@ -296,17 +383,28 @@ prom_pathtoinode(const char *path)
        int node, inst;
 
        inst = prom_devopen (path);
-       if (inst == 0) return 0;
-       node = prom_inst2pkg (inst);
-       prom_devclose (inst);
-       if (node == -1) return 0;
+       if (inst == 0)
+               return 0;
+       node = prom_inst2pkg(inst);
+       prom_devclose(inst);
+       if (node == -1)
+               return 0;
        return node;
 }
 
 int prom_ihandle2path(int handle, char *buffer, int bufsize)
 {
-       return p1275_cmd("instance-to-path",
-                        P1275_ARG(1,P1275_ARG_OUT_BUF)|
-                        P1275_INOUT(3, 1),
-                        handle, buffer, P1275_SIZE(bufsize));
+       unsigned long args[7];
+
+       args[0] = (unsigned long) "instance-to-path";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) handle;
+       args[4] = (unsigned long) buffer;
+       args[5] = bufsize;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[6];
 }
index de317d0c329486b57efb3568f7c4514393ac5677..ebc680717e59f69394793eeea1cd6e5e9ae50034 100644 (file)
@@ -690,7 +690,7 @@ static void with_console(struct mc_request *req, void (*proc)(void *),
 static void sysrq_proc(void *arg)
 {
        char *op = arg;
-       handle_sysrq(*op, NULL);
+       handle_sysrq(*op);
 }
 
 void mconsole_sysrq(struct mc_request *req)
index f35eb45d6576258e7dba242934d76376380b531c..c4191b3b7056c6ad16563375f529d1480668a26e 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index 404a880ea325510ab2f659e65c02b54314948cfb..d395540ff8948f112a8de16774f2b17bfb1dd695 100644 (file)
@@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
                                            int node);
 extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
 
+#ifdef CONFIG_PCI
+
+#ifdef CONFIG_PCI_DOMAINS
 static inline int pci_domain_nr(struct pci_bus *bus)
 {
        struct pci_sysdata *sd = bus->sysdata;
@@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 {
        return pci_domain_nr(bus);
 }
-
+#endif
 
 /* Can be used to override the logic in pci_scan_bus for skipping
    already-configured bus numbers - to be used for buggy BIOSes
    or architectures with incomplete PCI setup by the loader */
 
-#ifdef CONFIG_PCI
 extern unsigned int pcibios_assign_all_busses(void);
 extern int pci_legacy_init(void);
 # ifdef CONFIG_ACPI
index c0427295e8f58956e32f833c78c9ad75676778d2..1ca132fc0d039cbc8c3b7f605fa4dbbd91db7291 100644 (file)
@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 
 extern int notsc_setup(char *);
+extern void save_sched_clock_state(void);
+extern void restore_sched_clock_state(void);
 
 #endif /* _ASM_X86_TSC_H */
index 224392d8fe8c095390439a10ea45af2e21bc0930..5e975298fa819ff3ca648e647f531700edb1f408 100644 (file)
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
-       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
                kfree(b);
                err = -ENOMEM;
                goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, c->llc_shared_map);
+       cpumask_set_cpu(cpu, b->cpus);
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index c2a8b26d4feacf4ac6b6b022c0a9c590fbbcef85..d9368eeda3090eb9f53482704e000e600b6237e3 100644 (file)
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+                               unsigned int cpu)
 {
        int err;
-       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
        if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&therm_cpu_lock);
-               err = thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev, cpu);
                mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
 #endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
-               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
                WARN_ON(err);
        }
 #ifdef CONFIG_HOTPLUG_CPU
index f2da20fda02ddf6fcd449a88ba399fe4ed44af2a..3efdf2870a3572263add749326aa5925df7c461f 100644 (file)
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
index d8d86d01400866c6320001fb715301276747cd52..ee05c90012d269e66de9ffb6a5952d1434317c1e 100644 (file)
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        int bit, loops;
-       u64 ack, status;
+       u64 status;
+       int handled = 0;
 
        perf_sample_data_init(&data, 0);
 
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 
        loops = 0;
 again:
+       intel_pmu_ack_status(status);
        if (++loops > 100) {
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
        }
 
        inc_irq_stat(apic_perf_irqs);
-       ack = status;
 
        intel_pmu_lbr_read();
 
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
-       if (__test_and_clear_bit(62, (unsigned long *)&status))
+       if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+               handled++;
                x86_pmu.drain_pebs(regs);
+       }
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
+               handled++;
+
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -761,8 +766,6 @@ again:
                        x86_pmu_stop(event);
        }
 
-       intel_pmu_ack_status(ack);
-
        /*
         * Repeat if there is more work to be done:
         */
@@ -772,7 +775,7 @@ again:
 
 done:
        intel_pmu_enable_all(0);
-       return 1;
+       return handled;
 }
 
 static struct event_constraint *
index febb12cea795268f224a9d39937e3f27bb86bf67..b560db3305be16ff954fc416137d17b17189b5e7 100644 (file)
@@ -497,6 +497,8 @@ static int p4_hw_config(struct perf_event *event)
                event->hw.config |= event->attr.config &
                        (p4_config_pack_escr(P4_ESCR_MASK_HT) |
                         p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
+
+               event->hw.config &= ~P4_CCCR_FORCE_OVF;
        }
 
        rc = x86_setup_perfctr(event);
@@ -690,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                inc_irq_stat(apic_perf_irqs);
        }
 
-       return handled > 0;
+       return handled;
 }
 
 /*
index a874495b3673baeb27467d144995d885f2f94ebc..e2a5952573905b2eeac3d18060be54532dbfdfde 100644 (file)
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
        /* Copy kernel address range */
        clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
+                       KERNEL_PGD_PTRS);
 
        /* Initialize low mappings */
        clone_pgd_range(trampoline_pg_dir,
index ce8e50239332470ba329dd3045692fee42f69a16..d632934cb6386947352650f262745eb3c93c68ce 100644 (file)
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
        local_irq_restore(flags);
 }
 
+static unsigned long long cyc2ns_suspend;
+
+void save_sched_clock_state(void)
+{
+       if (!sched_clock_stable)
+               return;
+
+       cyc2ns_suspend = sched_clock();
+}
+
+/*
+ * Even on processors with invariant TSC, TSC gets reset in some the
+ * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
+ * arbitrary value (still sync'd across cpu's) during resume from such sleep
+ * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
+ * that sched_clock() continues from the point where it was left off during
+ * suspend.
+ */
+void restore_sched_clock_state(void)
+{
+       unsigned long long offset;
+       unsigned long flags;
+       int cpu;
+
+       if (!sched_clock_stable)
+               return;
+
+       local_irq_save(flags);
+
+       get_cpu_var(cyc2ns_offset) = 0;
+       offset = cyc2ns_suspend - sched_clock();
+
+       for_each_possible_cpu(cpu)
+               per_cpu(cyc2ns_offset, cpu) = offset;
+
+       local_irq_restore(flags);
+}
+
 #ifdef CONFIG_CPU_FREQ
 
 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
index 84e236ce76ba9a8afd624cfc4c506ebaa654b926..72fc70cf6184c756b1157f272b0d5e2b7bcc0609 100644 (file)
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
index f6b48f6c595176a59c8e2fe5fa145bc11acca118..cfe4faabb0f6792aebfb8330dd55f02406b148dc 100644 (file)
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (!error)
-               error = sysdev_register(&device_oprofile);
+       if (error)
+               return error;
+
+       error = sysdev_register(&device_oprofile);
+       if (error)
+               sysdev_class_unregister(&oprofile_sysclass);
+
        return error;
 }
 
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
 }
 
 #else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int  init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        char *cpu_type = NULL;
        int ret = 0;
 
+       using_nmi = 0;
+
        if (!cpu_has_apic)
                return -ENODEV;
 
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       init_sysfs();
+       ret = init_sysfs();
+       if (ret)
+               return ret;
+
        using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
index e7e8c5f549563a6b65a4b139d8202ee878c80c74..87bb35e34ef175d0a8b3beedd5c4d76010c9169d 100644 (file)
@@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt)
 void save_processor_state(void)
 {
        __save_processor_state(&saved_context);
+       save_sched_clock_state();
 }
 #ifdef CONFIG_X86_32
 EXPORT_SYMBOL(save_processor_state);
@@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
 void restore_processor_state(void)
 {
        __restore_processor_state(&saved_context);
+       restore_sched_clock_state();
 }
 #ifdef CONFIG_X86_32
 EXPORT_SYMBOL(restore_processor_state);
index 554c002a1e1afcd13a71c90a25eff86057e56832..0f456386cce5dc3a0c08ed45bfaf0a268be25819 100644 (file)
@@ -72,13 +72,17 @@ void __init xen_unplug_emulated_devices(void)
 {
        int r;
 
+       /* user explicitly requested no unplug */
+       if (xen_emul_unplug & XEN_UNPLUG_NEVER)
+               return;
        /* check the version of the xen platform PCI device */
        r = check_platform_magic();
        /* If the version matches enable the Xen platform PCI driver.
-        * Also enable the Xen platform PCI driver if the version is really old
-        * and the user told us to ignore it. */
+        * Also enable the Xen platform PCI driver if the host does
+        * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC)
+        * but the user told us that unplugging is unnecessary. */
        if (r && !(r == XEN_PLATFORM_ERR_MAGIC &&
-                       (xen_emul_unplug & XEN_UNPLUG_IGNORE)))
+                       (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)))
                return;
        /* Set the default value of xen_emul_unplug depending on whether or
         * not the Xen PV frontends and the Xen platform PCI driver have
@@ -99,7 +103,7 @@ void __init xen_unplug_emulated_devices(void)
                }
        }
        /* Now unplug the emulated devices */
-       if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE))
+       if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))
                outw(xen_emul_unplug, XEN_IOPORT_UNPLUG);
        xen_platform_pci_unplug = xen_emul_unplug;
 }
@@ -125,8 +129,10 @@ static int __init parse_xen_emul_unplug(char *arg)
                        xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS;
                else if (!strncmp(p, "nics", l))
                        xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
-               else if (!strncmp(p, "ignore", l))
-                       xen_emul_unplug |= XEN_UNPLUG_IGNORE;
+               else if (!strncmp(p, "unnecessary", l))
+                       xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY;
+               else if (!strncmp(p, "never", l))
+                       xen_emul_unplug |= XEN_UNPLUG_NEVER;
                else
                        printk(KERN_WARNING "unrecognised option '%s' "
                                 "in parameter 'xen_emul_unplug'\n", p);
index 1cd497d7a15aa2cdbd96be5139ca64be8135e528..e573077f1672e06bf159c9a242564c5436fc0a75 100644 (file)
@@ -101,13 +101,13 @@ config CRYPTO_MANAGER2
        select CRYPTO_BLKCIPHER2
        select CRYPTO_PCOMP2
 
-config CRYPTO_MANAGER_TESTS
-       bool "Run algolithms' self-tests"
+config CRYPTO_MANAGER_DISABLE_TESTS
+       bool "Disable run-time self tests"
        default y
        depends on CRYPTO_MANAGER2
        help
-         Run cryptomanager's tests for the new crypto algorithms being
-         registered.
+         Disable run-time self tests that normally take place at
+         algorithm registration.
 
 config CRYPTO_GF128MUL
        tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
index b8c59b889c6ea579e7a6bacd7241bbbc86cbe44c..f669822a7a443c81a04c8fb61d8d382a64546542 100644 (file)
@@ -47,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
        walk->data = crypto_kmap(walk->pg, 0);
        walk->data += offset;
 
-       if (offset & alignmask)
-               nbytes = alignmask + 1 - (offset & alignmask);
+       if (offset & alignmask) {
+               unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+               if (nbytes > unaligned)
+                       nbytes = unaligned;
+       }
 
        walk->entrylen -= nbytes;
        return nbytes;
index 40bd391f34d90fc0b0619809beb0f09759b307f5..791d194958fa18ac0fb8006e5d565f789c1e4f92 100644 (file)
@@ -206,13 +206,16 @@ err:
        return NOTIFY_OK;
 }
 
-#ifdef CONFIG_CRYPTO_MANAGER_TESTS
 static int cryptomgr_test(void *data)
 {
        struct crypto_test_param *param = data;
        u32 type = param->type;
        int err = 0;
 
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+       goto skiptest;
+#endif
+
        if (type & CRYPTO_ALG_TESTED)
                goto skiptest;
 
@@ -267,7 +270,6 @@ err_put_module:
 err:
        return NOTIFY_OK;
 }
-#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
 
 static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
                            void *data)
@@ -275,10 +277,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
        switch (msg) {
        case CRYPTO_MSG_ALG_REQUEST:
                return cryptomgr_schedule_probe(data);
-#ifdef CONFIG_CRYPTO_MANAGER_TESTS
        case CRYPTO_MSG_ALG_REGISTER:
                return cryptomgr_schedule_test(data);
-#endif
        }
 
        return NOTIFY_DONE;
index abd980c729ebce5a57e833a0937bf233dc8937cc..fa8c8f78c8d4c1838d121e6131b05162858b2802 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "internal.h"
 
-#ifndef CONFIG_CRYPTO_MANAGER_TESTS
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
 
 /* a perfect nop */
 int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -2542,6 +2542,6 @@ non_fips_alg:
        return -EINVAL;
 }
 
-#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
+#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
 
 EXPORT_SYMBOL_GPL(alg_test);
index 1f67057af2a571991481bb761d157320b9d3f329..3ba8d1f44a73ed3b234e810f5316f6531e4d6718 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include <linux/pci-acpi.h>
-#include <linux/pci-aspm.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
        return status;
 }
 
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+                                       u32 support,
+                                       u32 *control)
 {
        acpi_status status;
-       u32 support_set, result, capbuf[3];
+       u32 result, capbuf[3];
+
+       support &= OSC_PCI_SUPPORT_MASKS;
+       support |= root->osc_support_set;
 
-       /* do _OSC query for all possible controls */
-       support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
        capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = support_set;
-       capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+       capbuf[OSC_SUPPORT_TYPE] = support;
+       if (control) {
+               *control &= OSC_PCI_CONTROL_MASKS;
+               capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+       } else {
+               /* Run _OSC query for all possible controls. */
+               capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+       }
 
        status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
        if (ACPI_SUCCESS(status)) {
-               root->osc_support_set = support_set;
-               root->osc_control_qry = result;
-               root->osc_queried = 1;
+               root->osc_support_set = support;
+               if (control)
+                       *control = result;
        }
        return status;
 }
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
        if (ACPI_FAILURE(status))
                return status;
        mutex_lock(&osc_lock);
-       status = acpi_pci_query_osc(root, flags);
+       status = acpi_pci_query_osc(root, flags, NULL);
        mutex_unlock(&osc_lock);
        return status;
 }
@@ -365,55 +373,70 @@ out:
 EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
 
 /**
- * acpi_pci_osc_control_set - commit requested control to Firmware
- * @handle: acpi_handle for the target ACPI object
- * @flags: driver's requested control bits
+ * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
+ * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
+ * @mask: Mask of _OSC bits to request control of, place to store control mask.
+ * @req: Mask of _OSC bits the control of is essential to the caller.
+ *
+ * Run _OSC query for @mask and if that is successful, compare the returned
+ * mask of control bits with @req.  If all of the @req bits are set in the
+ * returned mask, run _OSC request for it.
  *
- * Attempt to take control from Firmware on requested control bits.
+ * The variable at the @mask address may be modified regardless of whether or
+ * not the function returns success.  On success it will contain the mask of
+ * _OSC bits the BIOS has granted control of, but its contents are meaningless
+ * on failure.
  **/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
+acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
 {
+       struct acpi_pci_root *root;
        acpi_status status;
-       u32 control_req, result, capbuf[3];
+       u32 ctrl, capbuf[3];
        acpi_handle tmp;
-       struct acpi_pci_root *root;
 
-       status = acpi_get_handle(handle, "_OSC", &tmp);
-       if (ACPI_FAILURE(status))
-               return status;
+       if (!mask)
+               return AE_BAD_PARAMETER;
 
-       control_req = (flags & OSC_PCI_CONTROL_MASKS);
-       if (!control_req)
+       ctrl = *mask & OSC_PCI_CONTROL_MASKS;
+       if ((ctrl & req) != req)
                return AE_TYPE;
 
        root = acpi_pci_find_root(handle);
        if (!root)
                return AE_NOT_EXIST;
 
+       status = acpi_get_handle(handle, "_OSC", &tmp);
+       if (ACPI_FAILURE(status))
+               return status;
+
        mutex_lock(&osc_lock);
+
+       *mask = ctrl | root->osc_control_set;
        /* No need to evaluate _OSC if the control was already granted. */
-       if ((root->osc_control_set & control_req) == control_req)
+       if ((root->osc_control_set & ctrl) == ctrl)
                goto out;
 
-       /* Need to query controls first before requesting them */
-       if (!root->osc_queried) {
-               status = acpi_pci_query_osc(root, root->osc_support_set);
+       /* Need to check the available controls bits before requesting them. */
+       while (*mask) {
+               status = acpi_pci_query_osc(root, root->osc_support_set, mask);
                if (ACPI_FAILURE(status))
                        goto out;
+               if (ctrl == *mask)
+                       break;
+               ctrl = *mask;
        }
-       if ((root->osc_control_qry & control_req) != control_req) {
-               printk(KERN_DEBUG
-                      "Firmware did not grant requested _OSC control\n");
+
+       if ((ctrl & req) != req) {
                status = AE_SUPPORT;
                goto out;
        }
 
        capbuf[OSC_QUERY_TYPE] = 0;
        capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
-       capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
-       status = acpi_pci_run_osc(handle, capbuf, &result);
+       capbuf[OSC_CONTROL_TYPE] = ctrl;
+       status = acpi_pci_run_osc(handle, capbuf, mask);
        if (ACPI_SUCCESS(status))
-               root->osc_control_set = result;
+               root->osc_control_set = *mask;
 out:
        mutex_unlock(&osc_lock);
        return status;
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
        if (flags != base_flags)
                acpi_pci_osc_support(root, flags);
 
-       status = acpi_pci_osc_control_set(root->device->handle,
-                                         OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
-               pcie_no_aspm();
-       }
-
        pci_acpi_add_bus_pm_notifier(device, root->bus);
        if (device->wakeup.flags.run_wake)
                device_set_run_wake(root->bus->bridge, true);
index 65e3e2708371962a3878061a8b71666230e80c2c..11ec911016c6ab3e81cefe7e202c4cc4c95762f0 100644 (file)
@@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF
 config PATA_WINBOND_VLB
        tristate "Winbond W83759A VLB PATA support (Experimental)"
        depends on ISA && EXPERIMENTAL
+       select PATA_LEGACY
        help
          Support for the Winbond W83759A controller on Vesa Local Bus
          systems.
index 158eaa961b1e6f0d247f78691b8f4b309a9d6622..d5df04a395ca6e9eb057cfb7496b3101ffee69b7 100644 (file)
@@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI)                += pata_qdi.o
 obj-$(CONFIG_PATA_RB532)       += pata_rb532_cf.o
 obj-$(CONFIG_PATA_RZ1000)      += pata_rz1000.o
 obj-$(CONFIG_PATA_SAMSUNG_CF)  += pata_samsung_cf.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
 
 obj-$(CONFIG_PATA_PXA)         += pata_pxa.o
 
index fe75d8befc3a27aaa5d6c8048cbaa498416c7322..ff1c945fba98e39de55b42ce46a5e9af26b34c45 100644 (file)
@@ -60,6 +60,7 @@ enum board_ids {
        board_ahci,
        board_ahci_ign_iferr,
        board_ahci_nosntf,
+       board_ahci_yes_fbs,
 
        /* board IDs for specific chipsets in alphabetical order */
        board_ahci_mcp65,
@@ -132,6 +133,14 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_yes_fbs] =
+       {
+               AHCI_HFLAGS     (AHCI_HFLAG_YES_FBS),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        /* by chipsets */
        [board_ahci_mcp65] =
        {
@@ -244,6 +253,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+       { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+       { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+       { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -362,6 +374,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        /* Marvell */
        { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },        /* 6145 */
        { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },        /* 6121 */
+       { PCI_DEVICE(0x1b4b, 0x9123),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9128 */
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
index 7113c5724471c7df3a37bae273bef3c73fb45d1b..474427b6f99f47a8dd997042cc9f2d271a743228 100644 (file)
@@ -209,6 +209,7 @@ enum {
                                                        link offline */
        AHCI_HFLAG_NO_SNTF              = (1 << 12), /* no sntf */
        AHCI_HFLAG_NO_FPDMA_AA          = (1 << 13), /* no FPDMA AA */
+       AHCI_HFLAG_YES_FBS              = (1 << 14), /* force FBS cap on */
 
        /* ap->flags bits */
 
index 3971bc0a4838235ea26f4bff0b97ec7dd3335b69..d712675d0a9657d3cc7742b74220aaf05d0fa537 100644 (file)
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
 };
 
index 81e772a94d59857187fda27dbfe7702469fb590f..68dc6785472f86932f769029ae6046a0dffeec47 100644 (file)
@@ -430,6 +430,12 @@ void ahci_save_initial_config(struct device *dev,
                cap &= ~HOST_CAP_SNTF;
        }
 
+       if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+               dev_printk(KERN_INFO, dev,
+                          "controller can do FBS, turning on CAP_FBS\n");
+               cap |= HOST_CAP_FBS;
+       }
+
        if (force_port_map && port_map != force_port_map) {
                dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
                           port_map, force_port_map);
@@ -1320,7 +1326,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
        /* issue the first D2H Register FIS */
        msecs = 0;
        now = jiffies;
-       if (time_after(now, deadline))
+       if (time_after(deadline, now))
                msecs = jiffies_to_msecs(deadline - now);
 
        tf.ctl |= ATA_SRST;
@@ -2036,9 +2042,15 @@ static int ahci_port_start(struct ata_port *ap)
                u32 cmd = readl(port_mmio + PORT_CMD);
                if (cmd & PORT_CMD_FBSCP)
                        pp->fbs_supported = true;
-               else
+               else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+                       dev_printk(KERN_INFO, dev,
+                                  "port %d can do FBS, forcing FBSCP\n",
+                                  ap->port_no);
+                       pp->fbs_supported = true;
+               } else
                        dev_printk(KERN_WARNING, dev,
-                                  "The port is not capable of FBS\n");
+                                  "port %d is not capable of FBS\n",
+                                  ap->port_no);
        }
 
        if (pp->fbs_supported) {
index 7ef7c4f216fa58567e161c82118155411d070074..932eaee5024527f4e617064726e39db808c17e87 100644 (file)
@@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
        qc->flags |= ATA_QCFLAG_ACTIVE;
        ap->qc_active |= 1 << qc->tag;
 
-       /* We guarantee to LLDs that they will have at least one
+       /*
+        * We guarantee to LLDs that they will have at least one
         * non-zero sg if the command is a data command.
         */
-       BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+       if (WARN_ON_ONCE(ata_is_data(prot) &&
+                        (!qc->sg || !qc->n_elem || !qc->nbytes)))
+               goto sys_err;
 
        if (ata_is_dma(prot) || (ata_is_pio(prot) &&
                                 (ap->flags & ATA_FLAG_PIO_DMA)))
                if (ata_sg_setup(qc))
-                       goto sg_err;
+                       goto sys_err;
 
        /* if device is sleeping, schedule reset and abort the link */
        if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
                goto err;
        return;
 
-sg_err:
+sys_err:
        qc->err_mask |= AC_ERR_SYSTEM;
 err:
        ata_qc_complete(qc);
@@ -5415,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  */
 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
+       unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
        /*
@@ -5423,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
         */
        ata_lpm_enable(host);
 
-       rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+       /*
+        * On some hardware, device fails to respond after spun down
+        * for suspend.  As the device won't be used before being
+        * resumed, we don't need to touch the device.  Ask EH to skip
+        * the usual stuff and proceed directly to suspend.
+        *
+        * http://thread.gmane.org/gmane.linux.ide/46764
+        */
+       if (mesg.event == PM_EVENT_SUSPEND)
+               ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
        if (rc == 0)
                host->dev->power.power_state = mesg;
        return rc;
index c9ae299b83428d039c13cce845e7acc497bcff13..e48302eae55fcd2ad619564c9cd8237c722efb29 100644 (file)
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
+       /* skip if explicitly requested */
+       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+               return 1;
+
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
index 674c1436491f5e5e6df3a2334ee7f995775f7634..e30c537cce32f99ab7fcb07f7f124cd920c4cd4b 100644 (file)
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                if (ioaddr->ctl_addr)
                        iowrite8(tf->ctl, ioaddr->ctl_addr);
                ap->last_ctl = tf->ctl;
+               ata_wait_idle(ap);
        }
 
        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                iowrite8(tf->device, ioaddr->device_addr);
                VPRINTK("device 0x%X\n", tf->device);
        }
+
+       ata_wait_idle(ap);
 }
 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                     u8 status, int in_wq)
 {
-       struct ata_eh_info *ehi = &ap->link.eh_info;
+       struct ata_link *link = qc->dev->link;
+       struct ata_eh_info *ehi = &link->eh_info;
        unsigned long flags = 0;
        int poll_next;
 
@@ -1298,8 +1302,14 @@ fsm_start:
 }
 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
 
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
 {
+       struct ata_port *ap = link->ap;
+
+       WARN_ON((ap->sff_pio_task_link != NULL) &&
+               (ap->sff_pio_task_link != link));
+       ap->sff_pio_task_link = link;
+
        /* may fail if ata_sff_flush_pio_task() in progress */
        queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
                           msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
 {
        struct ata_port *ap =
                container_of(work, struct ata_port, sff_pio_task.work);
+       struct ata_link *link = ap->sff_pio_task_link;
        struct ata_queued_cmd *qc;
        u8 status;
        int poll_next;
 
+       BUG_ON(ap->sff_pio_task_link == NULL); 
        /* qc can be NULL if timeout occurred */
-       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (!qc)
+       qc = ata_qc_from_tag(ap, link->active_tag);
+       if (!qc) {
+               ap->sff_pio_task_link = NULL;
                return;
+       }
 
 fsm_start:
        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
                msleep(2);
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
-                       ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+                       ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
                        return;
                }
        }
 
+       /*
+        * hsm_move() may trigger another command to be processed.
+        * clean the link beforehand.
+        */
+       ap->sff_pio_task_link = NULL;
        /* move the HSM */
        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
 
@@ -1376,6 +1395,7 @@ fsm_start:
 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* Use polling pio if the LLD doesn't handle
         * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                ap->hsm_task_state = HSM_ST_LAST;
 
                if (qc->tf.flags & ATA_TFLAG_POLLING)
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                break;
 
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                if (qc->tf.flags & ATA_TFLAG_WRITE) {
                        /* PIO data out protocol */
                        ap->hsm_task_state = HSM_ST_FIRST;
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                        /* always send first data block using the
                         * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                        ap->hsm_task_state = HSM_ST;
 
                        if (qc->tf.flags & ATA_TFLAG_POLLING)
-                               ata_sff_queue_pio_task(ap, 0);
+                               ata_sff_queue_pio_task(link, 0);
 
                        /* if polling, ata_sff_pio_task() handles the
                         * rest.  otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                /* send cdb by polling if no cdb interrupt */
                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
                    (qc->tf.flags & ATA_TFLAG_POLLING))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
@@ -2734,10 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
-
-       /* see ata_dma_blacklisted() */
-       BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
-              qc->tf.protocol == ATAPI_PROT_DMA);
+       struct ata_link *link = qc->dev->link;
 
        /* defer PIO handling to sff_qc_issue */
        if (!ata_is_dma(qc->tf.protocol))
@@ -2766,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 
                /* send cdb by polling if no cdb interrupt */
                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
index ba43f0f8c880a21aa1b8b1d3b9ae3d8c3487d415..2215632e4b317684a818b0ee5cbb7037fe064079 100644 (file)
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
        /* Odd numbered device ids are the units with enable bits (the -R cards) */
-       if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+       if ((pdev->device & 1) &&
+           !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
                return -ENOENT;
 
        return ata_sff_prereset(link, deadline);
index 9f5da1c7454be3e06f37586a77dc1377e7cd4a9d..905ff76d3cbbc0d18e828cd45922b53fd935f275 100644 (file)
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
 
                if (pair) {
                        struct ata_timing tp;
-
                        ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
                        ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
-                       if (pair->dma_mode) {
-                               ata_timing_compute(pair, pair->dma_mode,
-                                               &tp, T, 0);
-                               ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
-                       }
                }
        }
 
index 9df1ff7e1eaac84751766ec04c48b8f40da963c7..eaf194138f219f187e819fd56f4cce86b0cc90f7 100644 (file)
@@ -44,6 +44,9 @@
  *  Specific support is included for the ht6560a/ht6560b/opti82c611a/
  *  opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
  *
+ *  Support for the Winbond 83759A when operating in advanced mode.
+ *  Multichip mode is not currently supported.
+ *
  *  Use the autospeed and pio_mask options with:
  *     Appian ADI/2 aka CLPD7220 or AIC25VL01.
  *  Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b;               /* HT 6560A on primary 1, second 2, both 3 */
 static int opti82c611a;                /* Opti82c611A on primary 1, sec 2, both 3 */
 static int opti82c46x;         /* Opti 82c465MV present(pri/sec autodetect) */
 static int qdi;                        /* Set to probe QDI controllers */
-static int winbond;            /* Set to probe Winbond controllers,
-                                       give I/O port if non standard */
 static int autospeed;          /* Chip present which snoops speed changes */
 static int pio_mask = ATA_PIO4;        /* PIO range for autospeed devices */
 static int iordy_mask = 0xFFFFFFFF;    /* Use iordy if available */
 
+#ifdef PATA_WINBOND_VLB_MODULE
+static int winbond = 1;                /* Set to probe Winbond controllers,
+                                       give I/O port if non standard */
+#else
+static int winbond;            /* Set to probe Winbond controllers,
+                                       give I/O port if non standard */
+#endif
+
 /**
  *     legacy_probe_add        -       Add interface to probe list
  *     @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
 MODULE_DESCRIPTION("low-level driver for legacy ATA");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_winbond");
 
 module_param(probe_all, int, 0);
 module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
 module_param(opti82c611a, int, 0);
 module_param(opti82c46x, int, 0);
 module_param(qdi, int, 0);
+module_param(winbond, int, 0);
 module_param(pio_mask, int, 0);
 module_param(iordy_mask, int, 0);
 
index 5e659885de162e7ef3cf38e3e1f8a67098108712..ac8d7d97e4085d4122b33fbd58bde6951f3ef2e7 100644 (file)
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                        tf->lbam,
                        tf->lbah);
        }
+
+       ata_wait_idle(ap);
 }
 
 static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644 (file)
index 6d8619b..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- *    pata_winbond.c - Winbond VLB ATA controllers
- *     (C) 2006 Red Hat
- *
- *    Support for the Winbond 83759A when operating in advanced mode.
- *    Multichip mode is not currently supported.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_winbond"
-#define DRV_VERSION "0.0.3"
-
-#define NR_HOST 4      /* Two winbond controllers, two channels each */
-
-struct winbond_data {
-       unsigned long config;
-       struct platform_device *platform_dev;
-};
-
-static struct ata_host *winbond_host[NR_HOST];
-static struct winbond_data winbond_data[NR_HOST];
-static int nr_winbond_host;
-
-#ifdef MODULE
-static int probe_winbond = 1;
-#else
-static int probe_winbond;
-#endif
-
-static DEFINE_SPINLOCK(winbond_lock);
-
-static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&winbond_lock, flags);
-       outb(reg, port + 0x01);
-       outb(val, port + 0x02);
-       spin_unlock_irqrestore(&winbond_lock, flags);
-}
-
-static u8 winbond_readcfg(unsigned long port, u8 reg)
-{
-       u8 val;
-
-       unsigned long flags;
-       spin_lock_irqsave(&winbond_lock, flags);
-       outb(reg, port + 0x01);
-       val = inb(port + 0x02);
-       spin_unlock_irqrestore(&winbond_lock, flags);
-
-       return val;
-}
-
-static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
-       struct ata_timing t;
-       struct winbond_data *winbond = ap->host->private_data;
-       int active, recovery;
-       u8 reg;
-       int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
-
-       reg = winbond_readcfg(winbond->config, 0x81);
-
-       /* Get the timing data in cycles */
-       if (reg & 0x40)         /* Fast VLB bus, assume 50MHz */
-               ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
-       else
-               ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
-       active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
-       recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
-       timing = (active << 4) | recovery;
-       winbond_writecfg(winbond->config, timing, reg);
-
-       /* Load the setup timing */
-
-       reg = 0x35;
-       if (adev->class != ATA_DEV_ATA)
-               reg |= 0x08;    /* FIFO off */
-       if (!ata_pio_need_iordy(adev))
-               reg |= 0x02;    /* IORDY off */
-       reg |= (clamp_val(t.setup, 0, 3) << 6);
-       winbond_writecfg(winbond->config, timing + 1, reg);
-}
-
-
-static unsigned int winbond_data_xfer(struct ata_device *dev,
-                       unsigned char *buf, unsigned int buflen, int rw)
-{
-       struct ata_port *ap = dev->link->ap;
-       int slop = buflen & 3;
-
-       if (ata_id_has_dword_io(dev->id)) {
-               if (rw == READ)
-                       ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-               else
-                       iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
-               if (unlikely(slop)) {
-                       __le32 pad;
-                       if (rw == READ) {
-                               pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
-                               memcpy(buf + buflen - slop, &pad, slop);
-                       } else {
-                               memcpy(&pad, buf + buflen - slop, slop);
-                               iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
-                       }
-                       buflen += 4 - slop;
-               }
-       } else
-               buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
-       return buflen;
-}
-
-static struct scsi_host_template winbond_sht = {
-       ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations winbond_port_ops = {
-       .inherits       = &ata_sff_port_ops,
-       .sff_data_xfer  = winbond_data_xfer,
-       .cable_detect   = ata_cable_40wire,
-       .set_piomode    = winbond_set_piomode,
-};
-
-/**
- *     winbond_init_one                -       attach a winbond interface
- *     @type: Type to display
- *     @io: I/O port start
- *     @irq: interrupt line
- *     @fast: True if on a > 33Mhz VLB
- *
- *     Register a VLB bus IDE interface. Such interfaces are PIO and we
- *     assume do not support IRQ sharing.
- */
-
-static __init int winbond_init_one(unsigned long port)
-{
-       struct platform_device *pdev;
-       u8 reg;
-       int i, rc;
-
-       reg = winbond_readcfg(port, 0x81);
-       reg |= 0x80;    /* jumpered mode off */
-       winbond_writecfg(port, 0x81, reg);
-       reg = winbond_readcfg(port, 0x83);
-       reg |= 0xF0;    /* local control */
-       winbond_writecfg(port, 0x83, reg);
-       reg = winbond_readcfg(port, 0x85);
-       reg |= 0xF0;    /* programmable timing */
-       winbond_writecfg(port, 0x85, reg);
-
-       reg = winbond_readcfg(port, 0x81);
-
-       if (!(reg & 0x03))              /* Disabled */
-               return -ENODEV;
-
-       for (i = 0; i < 2 ; i ++) {
-               unsigned long cmd_port = 0x1F0 - (0x80 * i);
-               unsigned long ctl_port = cmd_port + 0x206;
-               struct ata_host *host;
-               struct ata_port *ap;
-               void __iomem *cmd_addr, *ctl_addr;
-
-               if (!(reg & (1 << i)))
-                       continue;
-
-               pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
-               if (IS_ERR(pdev))
-                       return PTR_ERR(pdev);
-
-               rc = -ENOMEM;
-               host = ata_host_alloc(&pdev->dev, 1);
-               if (!host)
-                       goto err_unregister;
-               ap = host->ports[0];
-
-               rc = -ENOMEM;
-               cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
-               ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
-               if (!cmd_addr || !ctl_addr)
-                       goto err_unregister;
-
-               ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
-
-               ap->ops = &winbond_port_ops;
-               ap->pio_mask = ATA_PIO4;
-               ap->flags |= ATA_FLAG_SLAVE_POSS;
-               ap->ioaddr.cmd_addr = cmd_addr;
-               ap->ioaddr.altstatus_addr = ctl_addr;
-               ap->ioaddr.ctl_addr = ctl_addr;
-               ata_sff_std_ports(&ap->ioaddr);
-
-               /* hook in a private data structure per channel */
-               host->private_data = &winbond_data[nr_winbond_host];
-               winbond_data[nr_winbond_host].config = port;
-               winbond_data[nr_winbond_host].platform_dev = pdev;
-
-               /* activate */
-               rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
-                                      &winbond_sht);
-               if (rc)
-                       goto err_unregister;
-
-               winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
-       }
-
-       return 0;
-
- err_unregister:
-       platform_device_unregister(pdev);
-       return rc;
-}
-
-/**
- *     winbond_init            -       attach winbond interfaces
- *
- *     Attach winbond IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int winbond_init(void)
-{
-       static const unsigned long config[2] = { 0x130, 0x1B0 };
-
-       int ct = 0;
-       int i;
-
-       if (probe_winbond == 0)
-               return -ENODEV;
-
-       /*
-        *      Check both base addresses
-        */
-
-       for (i = 0; i < 2; i++) {
-               if (probe_winbond & (1<<i)) {
-                       int ret = 0;
-                       unsigned long port = config[i];
-
-                       if (request_region(port, 2, "pata_winbond")) {
-                               ret = winbond_init_one(port);
-                               if (ret <= 0)
-                                       release_region(port, 2);
-                               else ct+= ret;
-                       }
-               }
-       }
-       if (ct != 0)
-               return 0;
-       return -ENODEV;
-}
-
-static __exit void winbond_exit(void)
-{
-       int i;
-
-       for (i = 0; i < nr_winbond_host; i++) {
-               ata_host_detach(winbond_host[i]);
-               release_region(winbond_data[i].config, 2);
-               platform_device_unregister(winbond_data[i].platform_dev);
-       }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(winbond_init);
-module_exit(winbond_exit);
-
-module_param(probe_winbond, int, 0);
-
index 2673a3d1480654ceec39f2ab144b15cde72ba72e..6cf57c5c2b5f3d50d966e63bf1e1ad0d32dea13d 100644 (file)
@@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
 {
        struct scatterlist *sg = qc->sg;
        struct ata_port *ap = qc->ap;
-       u32 dma_chan;
+       int dma_chan;
        struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
        struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
        int err;
index 9463c71dd38ece66e4ad777208381573754e3f64..a9fd9709c2627c7514fe74a8ec9ebc26f09b44f9 100644 (file)
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
  *     LOCKING:
  *     Inherited from caller.
  */
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
 {
-       struct ata_port *ap = qc->ap;
        void __iomem *port_mmio = mv_ap_base(ap);
        u32 cmd;
 
        /* clear start/stop bit */
        cmd = readl(port_mmio + BMDMA_CMD);
-       cmd &= ~ATA_DMA_START;
-       writelfl(cmd, port_mmio + BMDMA_CMD);
+       if (cmd & ATA_DMA_START) {
+               cmd &= ~ATA_DMA_START;
+               writelfl(cmd, port_mmio + BMDMA_CMD);
+
+               /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+               ata_sff_dma_pause(ap);
+       }
+}
 
-       /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-       ata_sff_dma_pause(ap);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+       mv_bmdma_stop_ap(qc->ap);
 }
 
 /**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
        reg = readl(port_mmio + BMDMA_STATUS);
        if (reg & ATA_DMA_ACTIVE)
                status = ATA_DMA_ACTIVE;
-       else
+       else if (reg & ATA_DMA_ERR)
                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+       else {
+               /*
+                * Just because DMA_ACTIVE is 0 (DMA completed),
+                * this does _not_ mean the device is "done".
+                * So we should not yet be signalling ATA_DMA_INTR
+                * in some cases.  Eg. DSM/TRIM, and perhaps others.
+                */
+               mv_bmdma_stop_ap(ap);
+               if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+                       status = 0;
+               else
+                       status = ATA_DMA_INTR;
+       }
        return status;
 }
 
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
 
        switch (tf->protocol) {
        case ATA_PROT_DMA:
+               if (tf->command == ATA_CMD_DSM)
+                       return;
+               /* fall-thru */
        case ATA_PROT_NCQ:
                break;  /* continue below */
        case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
        if ((tf->protocol != ATA_PROT_DMA) &&
            (tf->protocol != ATA_PROT_NCQ))
                return;
+       if (tf->command == ATA_CMD_DSM)
+               return;  /* use bmdma for this */
 
        /* Fill in Gen IIE command request block */
        if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2260,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
        }
 
        if (qc->tf.flags & ATA_TFLAG_POLLING)
-               ata_sff_queue_pio_task(ap, 0);
+               ata_sff_queue_pio_task(link, 0);
        return 0;
 }
 
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
 
        switch (qc->tf.protocol) {
        case ATA_PROT_DMA:
+               if (qc->tf.command == ATA_CMD_DSM) {
+                       if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
+                               return AC_ERR_OTHER;
+                       break;  /* use bmdma for this */
+               }
+               /* fall thru */
        case ATA_PROT_NCQ:
                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
index c8a44f5e0584bec3d72e4bbada17ebb80efdd20b..40af43ebd92d355e5b96092d0f4b4ac0a3fd05ea 100644 (file)
@@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p,
 out:
        if (retval) {
                release_firmware(firmware);
-               firmware_p = NULL;
+               *firmware_p = NULL;
        }
 
        return retval;
index ac1b682edecb362831eb32e7fd09faaff69da428..ab735a605cf3f23e0f516b21ae07f0d6c5e7e914 100644 (file)
@@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev,
                char *type;
                int len;
                /* no unplug has been done: do not hook devices != xen vbds */
-               if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) {
+               if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
                        int major;
 
                        if (!VDEV_IS_EXTENDED(vdevice))
index 710af89b176d0b8a7fcbed56dbb3a56b762e27f8..eab58db5f91cd9cfd6ffa5302761780ac8b3f373 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
+#include <linux/intel-gtt.h>
 
 #include "intel-gtt.c"
 
@@ -815,11 +816,19 @@ static const struct intel_driver_description {
            "HD Graphics", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
            "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
            "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
            "Sandybridge", NULL, &intel_gen6_driver },
        { 0, 0, NULL, NULL, NULL }
 };
@@ -1044,6 +1053,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
        ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
        ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
+       ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
        { }
 };
 
index 08d47532e60590f169f74995007e745623c4cd66..ee189c74d345ea98062ddbb914c87cb206691c59 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Common Intel AGPGART and GTT definitions.
  */
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
 
 /* Intel registers */
 #define INTEL_APSIZE   0xb4
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG          0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG  0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB             0x0100  /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG         0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG         0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG    0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB           0x0104  /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG       0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG       0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG  0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB           0x0108  /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG           0x010A
 
 /* cover 915 and 945 variants */
 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
 
 #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
 
 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
                IS_SNB)
+
+#endif
index d22ffb811bf2cea4001a97d485327d668ff9d85a..75e0a3497888d295d25dd414bda8a2ce69eee724 100644 (file)
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
         .type = INTEL_AGP_CACHED_MEMORY}
 };
 
+#define INTEL_AGP_UNCACHED_MEMORY              0
+#define INTEL_AGP_CACHED_MEMORY_LLC            1
+#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
+
+static struct gatt_mask intel_gen6_masks[] =
+{
+       {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+        .type = INTEL_AGP_UNCACHED_MEMORY },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+};
+
 static struct _intel_private {
        struct pci_dev *pcidev; /* device one */
        u8 __iomem *registers;
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
                                        off_t pg_start, int mask_type)
 {
        int i, j;
-       u32 cache_bits = 0;
-
-       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-       {
-               cache_bits = GEN6_PTE_LLC_MLC;
-       }
 
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
                return 0;
 }
 
+static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+                                       int type)
+{
+       unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+       unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+
+       if (type_mask == AGP_USER_UNCACHED_MEMORY)
+               return INTEL_AGP_UNCACHED_MEMORY;
+       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+               return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+                             INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+       else /* set 'normal'/'cached' to LLC by default */
+               return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+                             INTEL_AGP_CACHED_MEMORY_LLC;
+}
+
+
 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
                                int type)
 {
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
                        gtt_entries = 0;
                        break;
                }
-       } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-                  agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+       } else if (IS_SNB) {
                /*
                 * SandyBridge has new memory control reg at 0x50.w
                 */
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
                intel_i915_setup_chipset_flush();
        }
 
-       if (intel_private.ifp_resource.start) {
+       if (intel_private.ifp_resource.start)
                intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
-               if (!intel_private.i9xx_flush_page)
-                       dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
-       }
+       if (!intel_private.i9xx_flush_page)
+               dev_err(&intel_private.pcidev->dev,
+                       "can't ioremap flush page - no chipset flushing\n");
 }
 
 static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
 
        mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
 
-       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+       if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
            mask_type != INTEL_AGP_CACHED_MEMORY)
                goto out_err;
 
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
                                            dma_addr_t addr, int type)
 {
-       /* Shift high bits down */
-       addr |= (addr >> 28) & 0xff;
+       /* gen6 has bit11-4 for physical addr bit39-32 */
+       addr |= (addr >> 28) & 0xff0;
 
        /* Type checking must be done elsewhere */
        return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
                break;
        case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
        case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
                *gtt_offset = MB(2);
 
                pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
        .fetch_size             = intel_i9xx_fetch_size,
        .cleanup                = intel_i915_cleanup,
        .mask_memory            = intel_gen6_mask_memory,
-       .masks                  = intel_i810_masks,
+       .masks                  = intel_gen6_masks,
        .agp_enable             = intel_i810_agp_enable,
        .cache_flush            = global_cache_flush,
        .create_gatt_table      = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
        .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
        .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .agp_type_to_mask_type  = intel_gen6_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
 #ifdef USE_PCI_DMA_API
        .agp_map_page           = intel_agp_map_page,
index e0249722d25f1c8b9dd05fa6705187afc8e23256..f953c96efc86499165f6006d1c6b97ee1e430884 100644 (file)
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
                if (hangcheck_dump_tasks) {
                        printk(KERN_CRIT "Hangcheck: Task state:\n");
 #ifdef CONFIG_MAGIC_SYSRQ
-                       handle_sysrq('t', NULL);
+                       handle_sysrq('t');
 #endif  /* CONFIG_MAGIC_SYSRQ */
                }
                if (hangcheck_reboot) {
index fa27d1676ee5e0405cc5497bec6c703ade6e10b2..3afd62e856ebfea311eba94c496ac6628f586458 100644 (file)
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
                                        if (sysrq_pressed)
                                                continue;
                                } else if (sysrq_pressed) {
-                                       handle_sysrq(buf[i], tty);
+                                       handle_sysrq(buf[i]);
                                        sysrq_pressed = 0;
                                        continue;
                                }
index 1f4b6de65a2dcac06a565aa7777359e1e873b444..a2bc885ce60a8e2bab93a815ab6ffc7d86640220 100644 (file)
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
                        hp->sysrq = 1;
                        continue;
                } else if (hp->sysrq) {
-                       handle_sysrq(c, hp->tty);
+                       handle_sysrq(c);
                        hp->sysrq = 0;
                        continue;
                }
index 1acdb25095112ef538072e8d153a889476d1359f..a3f5e381e74647268517a22c6f65d7c641277f3d 100644 (file)
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
 
 static int n2rng_data_read(struct hwrng *rng, u32 *data)
 {
-       struct n2rng *np = rng->priv;
+       struct n2rng *np = (struct n2rng *) rng->priv;
        unsigned long ra = __pa(&np->test_data);
        int len;
 
index 07f3ea38b5828eda0a16ceed8b23fdbdee891d79..d4b71e8d0d23a13648be2e55f60f606f55b99edd 100644 (file)
@@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
        /* disable DSS reporting */
        i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
                                CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
-       if ( !tty || (tty->termios->c_cflag & HUPCL) ) {
+       if (tty->termios->c_cflag & HUPCL) {
                i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
                pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
                i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
                                if ( pCh )
                                {
                                        rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+                                       if (rc)
+                                               rc = -EFAULT;
                                } else {
                                        rc = -ENODEV;
                                }
index 79c3bc69165afb919f8084cf274c4d2310def8c2..7c79d243acc9b3fac68d6b4062fc9a43bb93eb65 100644 (file)
@@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
                }
                info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
                configure_r_port(tty, info, NULL);
+               mutex_unlock(&info->port.mutex);
                return 0;
        }
 
index fef80cfcab5c8caf231b4435d423095dfb90815e..e63b830c86cc0778cb45827dc46a7e207488b586 100644 (file)
@@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
        if (info->port.count == 1) {
                /* 1st open on this device, init hardware */
                retval = startup(info);
-               if (retval < 0)
+               if (retval < 0) {
+                       mutex_unlock(&info->port.mutex);
                        goto cleanup;
+               }
        }
        mutex_unlock(&info->port.mutex);
        retval = block_til_ready(tty, filp, info);
index 878ac0c2cc6864a84c7df230c5c12efee1944607..ef31bb81e8438a83462a48d4fa6ee0bf5cff7faf 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
-#include <linux/tty.h>
 #include <linux/mount.h>
 #include <linux/kdev_t.h>
 #include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
 __setup("sysrq_always_enabled", sysrq_always_enabled_setup);
 
 
-static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
+static void sysrq_handle_loglevel(int key)
 {
        int i;
 
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
 };
 
 #ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key, struct tty_struct *tty)
+static void sysrq_handle_SAK(int key)
 {
        struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
        schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
 #endif
 
 #ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key, struct tty_struct *tty)
+static void sysrq_handle_unraw(int key)
 {
        struct kbd_struct *kbd = &kbd_table[fg_console];
 
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
 #define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
 #endif /* CONFIG_VT */
 
-static void sysrq_handle_crash(int key, struct tty_struct *tty)
+static void sysrq_handle_crash(int key)
 {
        char *killer = NULL;
 
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
-static void sysrq_handle_reboot(int key, struct tty_struct *tty)
+static void sysrq_handle_reboot(int key)
 {
        lockdep_off();
        local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
        .enable_mask    = SYSRQ_ENABLE_BOOT,
 };
 
-static void sysrq_handle_sync(int key, struct tty_struct *tty)
+static void sysrq_handle_sync(int key)
 {
        emergency_sync();
 }
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
        .enable_mask    = SYSRQ_ENABLE_SYNC,
 };
 
-static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
+static void sysrq_handle_show_timers(int key)
 {
        sysrq_timer_list_show();
 }
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
        .action_msg     = "Show clockevent devices & pending hrtimers (no others)",
 };
 
-static void sysrq_handle_mountro(int key, struct tty_struct *tty)
+static void sysrq_handle_mountro(int key)
 {
        emergency_remount();
 }
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
 };
 
 #ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
+static void sysrq_handle_showlocks(int key)
 {
        debug_show_all_locks();
 }
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
 
 static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
-static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+static void sysrq_handle_showallcpus(int key)
 {
        /*
         * Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
 };
 #endif
 
-static void sysrq_handle_showregs(int key, struct tty_struct *tty)
+static void sysrq_handle_showregs(int key)
 {
        struct pt_regs *regs = get_irq_regs();
        if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
-static void sysrq_handle_showstate(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate(int key)
 {
        show_state();
 }
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
-static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate_blocked(int key)
 {
        show_state_filter(TASK_UNINTERRUPTIBLE);
 }
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
 #ifdef CONFIG_TRACING
 #include <linux/ftrace.h>
 
-static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
+static void sysrq_ftrace_dump(int key)
 {
        ftrace_dump(DUMP_ALL);
 }
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
 #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
 #endif
 
-static void sysrq_handle_showmem(int key, struct tty_struct *tty)
+static void sysrq_handle_showmem(int key)
 {
        show_mem();
 }
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
        }
 }
 
-static void sysrq_handle_term(int key, struct tty_struct *tty)
+static void sysrq_handle_term(int key)
 {
        send_sig_all(SIGTERM);
        console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
 
 static DECLARE_WORK(moom_work, moom_callback);
 
-static void sysrq_handle_moom(int key, struct tty_struct *tty)
+static void sysrq_handle_moom(int key)
 {
        schedule_work(&moom_work);
 }
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
 };
 
 #ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key, struct tty_struct *tty)
+static void sysrq_handle_thaw(int key)
 {
        emergency_thaw_all();
 }
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
 };
 #endif
 
-static void sysrq_handle_kill(int key, struct tty_struct *tty)
+static void sysrq_handle_kill(int key)
 {
        send_sig_all(SIGKILL);
        console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
 };
 
-static void sysrq_handle_unrt(int key, struct tty_struct *tty)
+static void sysrq_handle_unrt(int key)
 {
        normalize_rt_tasks();
 }
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
                 sysrq_key_table[i] = op_p;
 }
 
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+void __handle_sysrq(int key, bool check_mask)
 {
        struct sysrq_key_op *op_p;
        int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
                if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
                        printk("%s\n", op_p->action_msg);
                        console_loglevel = orig_log_level;
-                       op_p->handler(key, tty);
+                       op_p->handler(key);
                } else {
                        printk("This sysrq operation is disabled.\n");
                }
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
        spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
 }
 
-void handle_sysrq(int key, struct tty_struct *tty)
+void handle_sysrq(int key)
 {
        if (sysrq_on())
-               __handle_sysrq(key, tty, 1);
+               __handle_sysrq(key, true);
 }
 EXPORT_SYMBOL(handle_sysrq);
 
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
 
        default:
                if (sysrq_down && value && value != 2)
-                       __handle_sysrq(sysrq_xlate[code], NULL, 1);
+                       __handle_sysrq(sysrq_xlate[code], true);
                break;
        }
 
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
 
                if (get_user(c, buf))
                        return -EFAULT;
-               __handle_sysrq(c, NULL, 0);
+               __handle_sysrq(c, false);
        }
 
        return count;
index 949067a0bd4743151515382b07ccf7aecad11316..613c852ee0feced279c50f6850e9e30c7c870111 100644 (file)
@@ -355,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
                if (*stp == '\0')
                        stp = NULL;
 
-               if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+               if (tty_line >= 0 && tty_line < p->num && p->ops &&
                    p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
                        res = tty_driver_kref_get(p);
                        *line = tty_line;
index 50590c7f2c01ac4c2a71595e94fbebe930081079..281aada7b4a110a2d9f47d5e3e780596a36c298e 100644 (file)
@@ -906,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
                         * bottom of buffer
                         */
                        old_origin += (old_rows - new_rows) * old_row_size;
-                       end = vc->vc_scr_end;
                } else {
                        /*
                         * Cursor is in no man's land, copy 1/2 screenful
                         * from the top and bottom of cursor position
                         */
                        old_origin += (vc->vc_y - new_rows/2) * old_row_size;
-                       end = old_origin + (old_row_size * new_rows);
                }
-       } else
-               /*
-                * Cursor near the top, copy contents from the top of buffer
-                */
-               end = (old_rows > new_rows) ? old_origin +
-                       (old_row_size * new_rows) :
-                       vc->vc_scr_end;
+       }
+
+       end = old_origin + old_row_size * min(old_rows, new_rows);
 
        update_attr(vc);
 
@@ -3075,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
 
                old_was_color = vc->vc_can_do_color;
                vc->vc_sw->con_deinit(vc);
-               if (!vc->vc_origin)
-                       vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+               vc->vc_origin = (unsigned long)vc->vc_screenbuf;
                visual_init(vc, i, 0);
                set_origin(vc);
                update_attr(vc);
index 2bbeaaea46e9b7765ce983374fb125b7dd5422c9..38df8c19e74cc56903d5985cdbee7d52df3dc0d9 100644 (file)
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KIOCSOUND:
                if (!perm)
                        goto eperm;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
+               /*
+                * The use of PIT_TICK_RATE is historic, it used to be
+                * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+                * and 2.6.36, which was a minor but unfortunate ABI
+                * change.
+                */
                if (arg)
-                       arg = CLOCK_TICK_RATE / arg;
+                       arg = PIT_TICK_RATE / arg;
                kd_mksound(arg, 0);
                break;
 
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 */
                ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
                count = ticks ? (arg & 0xffff) : 0;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
                if (count)
-                       count = CLOCK_TICK_RATE / count;
+                       count = PIT_TICK_RATE / count;
                kd_mksound(count, ticks);
                break;
        }
index 670239ab7511aea7f902449c33e675efe3202a0a..e7d5d6b5dcf69683d5ac7c59d6608643c5ae4e53 100644 (file)
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
                amd64_handle_ce(mci, info);
        else if (ecc_type == 1)
                amd64_handle_ue(mci, info);
-
-       /*
-        * If main error is CE then overflow must be CE.  If main error is UE
-        * then overflow is unknown.  We'll call the overflow a CE - if
-        * panic_on_ue is set then we're already panic'ed and won't arrive
-        * here. Else, then apparently someone doesn't think that UE's are
-        * catastrophic.
-        */
-       if (info->nbsh & K8_NBSH_OVERFLOW)
-               edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
 }
 
 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
index bae9351e9473872214d6bd9870776a2c48680909..9014df6f605d4882bba31656cd09a91187afb169 100644 (file)
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
 
        pr_emerg("MC%d_STATUS: ", m->bank);
 
-       pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+       pr_cont("%sorrected error, other errors lost: %s, "
                 "CPU context corrupt: %s",
                 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
-                ((m->status & MCI_STATUS_EN) ? "yes"  : "no"),
-                ((m->status & MCI_STATUS_MISCV) ? ""  : "in"),
+                ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
                 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
 
        /* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
 static int __init mce_amd_init(void)
 {
        /*
-        * We can decode MCEs for Opteron and later CPUs:
+        * We can decode MCEs for K8, F10h and F11h CPUs:
         */
-       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
-           (boot_cpu_data.x86 >= 0xf))
-               atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return 0;
+
+       if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+               return 0;
+
+       atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
 
        return 0;
 }
index ca7ca56661e018c5ecfd9d4c56321f4dea901f93..b42a0bde8494bf0dec8cc2e20d34dfc3e7c046b5 100644 (file)
@@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction,
        spin_lock_irqsave(&card->lock, flags);
        list_for_each_entry(t, &card->transaction_list, link) {
                if (t == transaction) {
+                       if (!del_timer(&t->split_timeout_timer)) {
+                               spin_unlock_irqrestore(&card->lock, flags);
+                               goto timed_out;
+                       }
                        list_del_init(&t->link);
                        card->tlabel_mask &= ~(1ULL << t->tlabel);
                        break;
@@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction,
        spin_unlock_irqrestore(&card->lock, flags);
 
        if (&t->link != &card->transaction_list) {
-               del_timer_sync(&t->split_timeout_timer);
                t->callback(card, rcode, NULL, 0, t->callback_data);
                return 0;
        }
 
+ timed_out:
        return -ENOENT;
 }
 
@@ -921,6 +925,10 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
        spin_lock_irqsave(&card->lock, flags);
        list_for_each_entry(t, &card->transaction_list, link) {
                if (t->node_id == source && t->tlabel == tlabel) {
+                       if (!del_timer(&t->split_timeout_timer)) {
+                               spin_unlock_irqrestore(&card->lock, flags);
+                               goto timed_out;
+                       }
                        list_del_init(&t->link);
                        card->tlabel_mask &= ~(1ULL << t->tlabel);
                        break;
@@ -929,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
        spin_unlock_irqrestore(&card->lock, flags);
 
        if (&t->link == &card->transaction_list) {
+ timed_out:
                fw_notify("Unsolicited response (source %x, tlabel %x)\n",
                          source, tlabel);
                return;
@@ -963,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
                break;
        }
 
-       del_timer_sync(&t->split_timeout_timer);
-
        /*
         * The response handler may be executed while the request handler
         * is still pending.  Cancel the request handler.
index da17d409a244b0af97249341855dfe301b506ab3..33f8421c71cc05001c57bbf275b61679fded72eb 100644 (file)
@@ -579,7 +579,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
                if (!peer) {
                        fw_notify("No peer for ARP packet from %016llx\n",
                                  (unsigned long long)peer_guid);
-                       goto failed_proto;
+                       goto no_peer;
                }
 
                /*
@@ -656,7 +656,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
 
        return 0;
 
failed_proto:
no_peer:
        net->stats.rx_errors++;
        net->stats.rx_dropped++;
 
@@ -664,7 +664,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
        if (netif_queue_stopped(net))
                netif_wake_queue(net);
 
-       return 0;
+       return -ENOENT;
 }
 
 static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -701,7 +701,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
                        fw_error("out of memory\n");
                        net->stats.rx_dropped++;
 
-                       return -1;
+                       return -ENOMEM;
                }
                skb_reserve(skb, (net->hard_header_len + 15) & ~15);
                memcpy(skb_put(skb, len), buf, len);
@@ -726,8 +726,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
        spin_lock_irqsave(&dev->lock, flags);
 
        peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
-       if (!peer)
-               goto bad_proto;
+       if (!peer) {
+               retval = -ENOENT;
+               goto fail;
+       }
 
        pd = fwnet_pd_find(peer, datagram_label);
        if (pd == NULL) {
@@ -741,7 +743,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
                                  dg_size, buf, fg_off, len);
                if (pd == NULL) {
                        retval = -ENOMEM;
-                       goto bad_proto;
+                       goto fail;
                }
                peer->pdg_size++;
        } else {
@@ -755,9 +757,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
                        pd = fwnet_pd_new(net, peer, datagram_label,
                                          dg_size, buf, fg_off, len);
                        if (pd == NULL) {
-                               retval = -ENOMEM;
                                peer->pdg_size--;
-                               goto bad_proto;
+                               retval = -ENOMEM;
+                               goto fail;
                        }
                } else {
                        if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -768,7 +770,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
                                 */
                                fwnet_pd_delete(pd);
                                peer->pdg_size--;
-                               goto bad_proto;
+                               retval = -ENOMEM;
+                               goto fail;
                        }
                }
        } /* new datagram or add to existing one */
@@ -794,14 +797,13 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
        spin_unlock_irqrestore(&dev->lock, flags);
 
        return 0;
-
- bad_proto:
+ fail:
        spin_unlock_irqrestore(&dev->lock, flags);
 
        if (netif_queue_stopped(net))
                netif_wake_queue(net);
 
-       return 0;
+       return retval;
 }
 
 static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
index 7f03540cabe8b3932021545a6db05aebb3f729b1..be29b0bb247101442a63fa623467e98ad7b77524 100644 (file)
@@ -694,7 +694,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
        log_ar_at_event('R', p.speed, p.header, evt);
 
        /*
-        * The OHCI bus reset handler synthesizes a phy packet with
+        * Several controllers, notably from NEC and VIA, forget to
+        * write ack_complete status at PHY packet reception.
+        */
+       if (evt == OHCI1394_evt_no_status &&
+           (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
+               p.ack = ACK_COMPLETE;
+
+       /*
+        * The OHCI bus reset handler synthesizes a PHY packet with
         * the new generation number when a bus reset happens (see
         * section 8.4.2.3).  This helps us determine when a request
         * was received and make sure we send the response in the same
index 9f76171717e50302dc4e895a4988cb15f7e7023f..bfae4b309791103e48b2bb704300d1cd84da9750 100644 (file)
@@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
 
        if (&orb->link != &lu->orb_list) {
                orb->callback(orb, &status);
-               kref_put(&orb->kref, free_orb);
+               kref_put(&orb->kref, free_orb); /* orb callback reference */
        } else {
                fw_error("status write for unknown orb\n");
        }
@@ -472,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode,
         * So this callback only sets the rcode if it hasn't already
         * been set and only does the cleanup if the transaction
         * failed and we didn't already get a status write.
+        *
+        * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
+        * OXUF936QSE firmwares occasionally respond after Split_Timeout and
+        * complete the ORB just fine.  Note, we also get RCODE_CANCELLED
+        * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
         */
        spin_lock_irqsave(&card->lock, flags);
 
        if (orb->rcode == -1)
                orb->rcode = rcode;
-       if (orb->rcode != RCODE_COMPLETE) {
+
+       if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
                list_del(&orb->link);
                spin_unlock_irqrestore(&card->lock, flags);
+
                orb->callback(orb, NULL);
+               kref_put(&orb->kref, free_orb); /* orb callback reference */
        } else {
                spin_unlock_irqrestore(&card->lock, flags);
        }
 
-       kref_put(&orb->kref, free_orb);
+       kref_put(&orb->kref, free_orb); /* transaction callback reference */
 }
 
 static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -501,9 +509,8 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
        list_add_tail(&orb->link, &lu->orb_list);
        spin_unlock_irqrestore(&device->card->lock, flags);
 
-       /* Take a ref for the orb list and for the transaction callback. */
-       kref_get(&orb->kref);
-       kref_get(&orb->kref);
+       kref_get(&orb->kref); /* transaction callback reference */
+       kref_get(&orb->kref); /* orb callback reference */
 
        fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
                        node_id, generation, device->max_speed, offset,
@@ -525,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
 
        list_for_each_entry_safe(orb, next, &list, link) {
                retval = 0;
-               if (fw_cancel_transaction(device->card, &orb->t) == 0)
-                       continue;
+               fw_cancel_transaction(device->card, &orb->t);
 
                orb->rcode = RCODE_CANCELLED;
                orb->callback(orb, NULL);
+               kref_put(&orb->kref, free_orb); /* orb callback reference */
        }
 
        return retval;
index b42f42ca70c3c9454bb00ff7cf2e8505f74e482d..823559ab0e243610ca8e5ff3b5983aca3d53b7bb 100644 (file)
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
        return err;
 }
 
-static int sx150x_init_hw(struct sx150x_chip *chip,
-                       struct sx150x_platform_data *pdata)
+static int sx150x_reset(struct sx150x_chip *chip)
 {
-       int err = 0;
+       int err;
 
-       err = i2c_smbus_write_word_data(chip->client,
+       err = i2c_smbus_write_byte_data(chip->client,
                                        chip->dev_cfg->reg_reset,
-                                       0x3412);
+                                       0x12);
        if (err < 0)
                return err;
 
+       err = i2c_smbus_write_byte_data(chip->client,
+                                       chip->dev_cfg->reg_reset,
+                                       0x34);
+       return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+                       struct sx150x_platform_data *pdata)
+{
+       int err = 0;
+
+       if (pdata->reset_during_probe) {
+               err = sx150x_reset(chip);
+               if (err < 0)
+                       return err;
+       }
+
        err = sx150x_i2c_write(chip->client,
                        chip->dev_cfg->reg_misc,
                        0x01);
index 7e31d4348340b10c9c4b39b0612e119335dc444c..d2ab01e90a96315fee72015f4a76f194211d4a81 100644 (file)
@@ -34,6 +34,9 @@
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
 
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
 static void drm_mode_validate_flag(struct drm_connector *connector,
                                   int flags)
 {
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                        connector->status = connector_status_disconnected;
                if (connector->funcs->force)
                        connector->funcs->force(connector);
-       } else
+       } else {
                connector->status = connector->funcs->detect(connector);
+               drm_helper_hpd_irq_event(dev);
+       }
 
        if (connector->status == connector_status_disconnected) {
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        }
 
        count = (*connector_funcs->get_modes)(connector);
-       if (!count) {
+       if (count == 0 && connector->status == connector_status_connected)
                count = drm_add_modes_noedid(connector, 1024, 768);
-               if (!count)
-                       return 0;
-       }
+       if (count == 0)
+               goto prune;
 
        drm_mode_connector_list_update(connector);
 
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
        enum drm_connector_status old_status, status;
        bool repoll = false, changed = false;
 
+       if (!drm_kms_helper_poll)
+               return;
+
        mutex_lock(&dev->mode_config.mutex);
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
        bool poll = false;
        struct drm_connector *connector;
 
+       if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+               return;
+
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if (connector->polled)
                        poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
 {
        if (!dev->mode_config.poll_enabled)
                return;
+
        /* kill timer and schedule immediate execution, this doesn't block */
        cancel_delayed_work(&dev->mode_config.output_poll_work);
-       queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+       if (drm_kms_helper_poll)
+               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
index 90288ec7c28420133d0deea13e3ba2f1f98d413c..84da748555bc824379d1b49733db1444f33fc8d5 100644 (file)
@@ -55,6 +55,9 @@
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
 /** Ioctl table */
 static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp,
        int retcode = -EINVAL;
        char stack_kdata[128];
        char *kdata = NULL;
+       unsigned int usize, asize;
 
        dev = file_priv->minor->dev;
        atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp,
            ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
                goto err_i1;
        if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
-           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+               u32 drv_size;
                ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+               drv_size = _IOC_SIZE(ioctl->cmd_drv);
+               usize = asize = _IOC_SIZE(cmd);
+               if (drv_size > asize)
+                       asize = drv_size;
+       }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
                ioctl = &drm_ioctls[nr];
                cmd = ioctl->cmd;
+               usize = asize = _IOC_SIZE(cmd);
        } else
                goto err_i1;
 
@@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp,
                retcode = -EACCES;
        } else {
                if (cmd & (IOC_IN | IOC_OUT)) {
-                       if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+                       if (asize <= sizeof(stack_kdata)) {
                                kdata = stack_kdata;
                        } else {
-                               kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+                               kdata = kmalloc(asize, GFP_KERNEL);
                                if (!kdata) {
                                        retcode = -ENOMEM;
                                        goto err_i1;
@@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp,
 
                if (cmd & IOC_IN) {
                        if (copy_from_user(kdata, (void __user *)arg,
-                                          _IOC_SIZE(cmd)) != 0) {
+                                          usize) != 0) {
                                retcode = -EFAULT;
                                goto err_i1;
                        }
-               }
+               } else
+                       memset(kdata, 0, usize);
+
                if (ioctl->flags & DRM_UNLOCKED)
                        retcode = func(dev, kdata, file_priv);
                else {
@@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp,
 
                if (cmd & IOC_OUT) {
                        if (copy_to_user((void __user *)arg, kdata,
-                                        _IOC_SIZE(cmd)) != 0)
+                                        usize) != 0)
                                retcode = -EFAULT;
                }
        }
index de82e201d6826d8ac0e422b22a2d0a93f8312d82..6a5e403f9aa160b54caad1bca22ba93dd27415e6 100644 (file)
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
        int i;
        enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
        struct drm_fb_helper_cmdline_mode *cmdline_mode;
-       struct drm_connector *connector = fb_helper_conn->connector;
+       struct drm_connector *connector;
 
        if (!fb_helper_conn)
                return false;
+       connector = fb_helper_conn->connector;
 
        cmdline_mode = &fb_helper_conn->cmdline_mode;
        if (!mode_option)
@@ -369,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 }
 static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
 
-static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
+static void drm_fb_helper_sysrq(int dummy1)
 {
        schedule_work(&drm_fb_helper_restore_work);
 }
index 3a652a65546f16781542b7613865a8c1e692b60c..b744dad5c237f5371d571d13dc35d46a2fc95e2c 100644 (file)
@@ -41,6 +41,7 @@
 
 /* from BKL pushdown: note that nothing else serializes idr_find() */
 DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
 
 static int drm_open_helper(struct inode *inode, struct file *filp,
                           struct drm_device * dev);
index e2f70a516c34412112078131203309c3656622d7..9bf93bc9a32c27798791684ab73512a046e92a9c 100644 (file)
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                }
 
                /* Contention */
+               mutex_unlock(&drm_global_mutex);
                schedule();
+               mutex_lock(&drm_global_mutex);
                if (signal_pending(current)) {
                        ret = -EINTR;
                        break;
index da99edc50888f2168c510054305e1c847e1fbefb..a6bfc302ed909ecf136888953b329d5acbdf65a3 100644 (file)
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
 
 EXPORT_SYMBOL(drm_mm_put_block);
 
-static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
-                             unsigned alignment)
+static int check_free_hole(unsigned long start, unsigned long end,
+                          unsigned long size, unsigned alignment)
 {
        unsigned wasted = 0;
 
-       if (entry->size < size)
+       if (end - start < size)
                return 0;
 
        if (alignment) {
-               register unsigned tmp = entry->start % alignment;
+               unsigned tmp = start % alignment;
                if (tmp)
                        wasted = alignment - tmp;
        }
 
-       if (entry->size >= size + wasted) {
+       if (end >= start + size + wasted) {
                return 1;
        }
 
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
        best_size = ~0UL;
 
        list_for_each_entry(entry, &mm->free_stack, free_stack) {
-               if (!check_free_mm_node(entry, size, alignment))
+               if (!check_free_hole(entry->start, entry->start + entry->size,
+                                    size, alignment))
                        continue;
 
                if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
        best_size = ~0UL;
 
        list_for_each_entry(entry, &mm->free_stack, free_stack) {
-               if (entry->start > end || (entry->start+entry->size) < start)
-                       continue;
+               unsigned long adj_start = entry->start < start ?
+                       start : entry->start;
+               unsigned long adj_end = entry->start + entry->size > end ?
+                       end : entry->start + entry->size;
 
-               if (!check_free_mm_node(entry, size, alignment))
+               if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
                if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
        node->free_stack.prev = prev_free;
        node->free_stack.next = next_free;
 
-       if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
+       if (check_free_hole(node->start, node->start + node->size,
+                           mm->scan_size, mm->scan_alignment)) {
                mm->scan_hit_start = node->start;
                mm->scan_hit_size = node->size;
 
index f1f473ea97d3bc3c7af129fe7e364c33e6a02de8..949326d2a8e5b6b3609d002daa75795c7cff6566 100644 (file)
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
                drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
                /* Fill in HSync values */
                drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
-               drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+               drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+               /* Fill in VSync values */
+               drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+               drm_mode->vsync_end = drm_mode->vsync_start + vsync;
        }
        /* 15/13. Find pixel clock frequency (kHz for xf86) */
        drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
index 3778360eceea0cc44d2f2213090d5e1ed2e670f4..fda67468e603b6169393b92bc4922afef8b4d8ce 100644 (file)
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                                break;
                }
 
-               if (!agpmem)
+               if (&agpmem->head == &dev->agp->memory)
                        goto vm_fault_error;
 
                /*
index 0e6c131313d95b48e62a6dab63730049c7332355..61b4caf220fa83bd15815ea0f82b627f2d773727 100644 (file)
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 }
 
 struct drm_ioctl_desc i810_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
 };
 
 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
index 5168862c92271e5d7fddfb48d3470f4f70113d92..671aa18415ac52d17164e79b4c2a9f287b02da0d 100644 (file)
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 }
 
 struct drm_ioctl_desc i830_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
 };
 
 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
index 92d5605a34d11a6fcecbbae4149220c44ac86740..5e43d70767896e80400c96c628f2960270e1f2c4 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include "drmP.h"
 #include "drm.h"
+#include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       unsigned long flags;
+       struct intel_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               const char *pipe = crtc->pipe ? "B" : "A";
+               const char *plane = crtc->plane ? "B" : "A";
+               struct intel_unpin_work *work;
+
+               spin_lock_irqsave(&dev->event_lock, flags);
+               work = crtc->unpin_work;
+               if (work == NULL) {
+                       seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+                                  pipe, plane);
+               } else {
+                       if (!work->pending) {
+                               seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+                                          pipe, plane);
+                       } else {
+                               seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+                                          pipe, plane);
+                       }
+                       if (work->enable_stall_check)
+                               seq_printf(m, "Stall check enabled, ");
+                       else
+                               seq_printf(m, "Stall check waiting for page flip ioctl, ");
+                       seq_printf(m, "%d prepares\n", work->pending);
+
+                       if (work->old_fb_obj) {
+                               struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
+                               if(obj_priv)
+                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+                       }
+                       if (work->pending_flip_obj) {
+                               struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
+                               if(obj_priv)
+                                       seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+                       }
+               }
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+
+       return 0;
+}
+
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
index 44af317731b67071c3fa9ec13280975ae7f7d639..9d67b485303005771a090ea7a3c1e1c6f8b74e9e 100644 (file)
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
                ret = copy_from_user(cliprects, batch->cliprects,
                                     batch->num_cliprects *
                                     sizeof(struct drm_clip_rect));
-               if (ret != 0)
+               if (ret != 0) {
+                       ret = -EFAULT;
                        goto fail_free;
+               }
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
-       if (ret != 0)
+       if (ret != 0) {
+               ret = -EFAULT;
                goto fail_batch_free;
+       }
 
        if (cmdbuf->num_cliprects) {
                cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                ret = copy_from_user(cliprects, cmdbuf->cliprects,
                                     cmdbuf->num_cliprects *
                                     sizeof(struct drm_clip_rect));
-               if (ret != 0)
+               if (ret != 0) {
+                       ret = -EFAULT;
                        goto fail_clip_free;
+               }
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
        int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp_lo, temp_hi = 0;
        u64 mchbar_addr;
-       int ret = 0;
+       int ret;
 
        if (IS_I965G(dev))
                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
        /* If ACPI doesn't have it, assume we need to allocate it ourselves */
 #ifdef CONFIG_PNP
        if (mchbar_addr &&
-           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
-               ret = 0;
-               goto out;
-       }
+           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+               return 0;
 #endif
 
        /* Get some space for it */
-       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+       dev_priv->mch_res.name = "i915 MCHBAR";
+       dev_priv->mch_res.flags = IORESOURCE_MEM;
+       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+                                    &dev_priv->mch_res,
                                     MCHBAR_SIZE, MCHBAR_SIZE,
                                     PCIBIOS_MIN_MEM,
-                                    0,   pcibios_align_resource,
+                                    0, pcibios_align_resource,
                                     dev_priv->bridge_dev);
        if (ret) {
                DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
                dev_priv->mch_res.start = 0;
-               goto out;
+               return ret;
        }
 
        if (IS_I965G(dev))
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
 
        pci_write_config_dword(dev_priv->bridge_dev, reg,
                               lower_32_bits(dev_priv->mch_res.start));
-out:
-       return ret;
+       return 0;
 }
 
 /* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       /* overlay on gen2 is broken and can't address above 1G */
+       if (IS_GEN2(dev))
+               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
        dev_priv->regs = ioremap(base, size);
        if (!dev_priv->regs) {
                DRM_ERROR("failed to map registers\n");
@@ -2367,46 +2377,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 struct drm_ioctl_desc i915_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-       DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-       DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
-       DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index 00befce8fbb7f503b5ed5311c8acac81f1c00fcd..216deb579785eb93e27e2ba57a0556471a13daf2 100644 (file)
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
        .driver_data = (unsigned long) info }
 
 static const struct intel_device_info intel_i830_info = {
-       .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+       .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_845g_info = {
-       .is_i8xx = 1,
+       .gen = 2, .is_i8xx = 1,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-       .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+       .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
        .cursor_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-       .is_i8xx = 1,
+       .gen = 2, .is_i8xx = 1,
 };
 
 static const struct intel_device_info intel_i915g_info = {
-       .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-       .is_i9xx = 1,  .is_mobile = 1,
+       .gen = 3, .is_i9xx = 1,  .is_mobile = 1,
        .cursor_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-       .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-       .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+       .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i965g_info = {
-       .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+       .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+       .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
-       .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
-       .has_hotplug = 1,
+       .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+       .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-       .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_hotplug = 1,
+       .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_g45_info = {
-       .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_pipe_cxsr = 1,
-       .has_hotplug = 1,
+       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
-       .has_pipe_cxsr = 1,
-       .has_hotplug = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-       .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
-       .need_gfx_hws = 1,
-       .has_hotplug = 1,
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_ironlake_d_info = {
-       .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_pipe_cxsr = 1,
-       .has_hotplug = 1,
+       .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-       .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
-       .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
-       .has_hotplug = 1,
+       .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
-       .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_hotplug = 1, .is_gen6 = 1,
+       .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-       .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_hotplug = 1, .is_gen6 = 1,
+       .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -180,8 +175,12 @@ static const struct pci_device_id pciidlist[] = {          /* aka */
        INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
        INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
        INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
        INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
        INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
        {0, 0, 0}
 };
 
index 047cd7ce7e1b31ec981f9069fdfbfb412e9dd8ea..af4a263cf25782a6906f80a1d0783841000ac9b1 100644 (file)
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs {
 };
 
 struct intel_device_info {
+       u8 gen;
        u8 is_mobile : 1;
        u8 is_i8xx : 1;
        u8 is_i85x : 1;
@@ -206,7 +207,6 @@ struct intel_device_info {
        u8 is_broadwater : 1;
        u8 is_crestline : 1;
        u8 is_ironlake : 1;
-       u8 is_gen6 : 1;
        u8 has_fbc : 1;
        u8 has_rc6 : 1;
        u8 has_pipe_cxsr : 1;
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_845G(dev)           ((dev)->pci_device == 0x2562)
 #define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
 #define IS_I865G(dev)          ((dev)->pci_device == 0x2572)
-#define IS_GEN2(dev)           (INTEL_INFO(dev)->is_i8xx)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
 #define IS_I915GM(dev)         ((dev)->pci_device == 0x2592)
 #define IS_I945G(dev)          ((dev)->pci_device == 0x2772)
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
 #define IS_IRONLAKE(dev)       (INTEL_INFO(dev)->is_ironlake)
 #define IS_I9XX(dev)           (INTEL_INFO(dev)->is_i9xx)
-#define IS_GEN6(dev)           (INTEL_INFO(dev)->is_gen6)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 
-#define IS_GEN3(dev)   (IS_I915G(dev) ||                       \
-                        IS_I915GM(dev) ||                      \
-                        IS_I945G(dev) ||                       \
-                        IS_I945GM(dev) ||                      \
-                        IS_G33(dev) || \
-                        IS_PINEVIEW(dev))
-#define IS_GEN4(dev)   ((dev)->pci_device == 0x2972 ||         \
-                        (dev)->pci_device == 0x2982 ||         \
-                        (dev)->pci_device == 0x2992 ||         \
-                        (dev)->pci_device == 0x29A2 ||         \
-                        (dev)->pci_device == 0x2A02 ||         \
-                        (dev)->pci_device == 0x2A12 ||         \
-                        (dev)->pci_device == 0x2E02 ||         \
-                        (dev)->pci_device == 0x2E12 ||         \
-                        (dev)->pci_device == 0x2E22 ||         \
-                        (dev)->pci_device == 0x2E32 ||         \
-                        (dev)->pci_device == 0x2A42 ||         \
-                        (dev)->pci_device == 0x2E42)
+#define IS_GEN2(dev)   (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)   (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)   (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)   (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
 
 #define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
index df5a7135c2614c8de55e309c5dfad82efcf618ac..16fca1d1799a4211474a91e7fc52b605eceafbfc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
+#include <linux/intel-gtt.h>
 
 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
@@ -135,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       drm_gem_object_unreference_unlocked(obj);
-       if (ret)
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
                return ret;
+       }
 
-       args->handle = handle;
+       /* Sink the floating reference from kref_init(handlecount) */
+       drm_gem_object_handle_unreference_unlocked(obj);
 
+       args->handle = handle;
        return 0;
 }
 
@@ -3585,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (ret != 0) {
                        DRM_ERROR("copy %d cliprects failed: %d\n",
                                  args->num_cliprects, ret);
+                       ret = -EFAULT;
                        goto pre_mutex_err;
                }
        }
index 16861b800feeb46577f35e616c3d891b6dff9f4e..59457e83b011aa3bbac119faf6836a66df7affa5 100644 (file)
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
        queue_work(dev_priv->wq, &dev_priv->error_work);
 }
 
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_gem_object *obj_priv;
+       struct intel_unpin_work *work;
+       unsigned long flags;
+       bool stall_detected;
+
+       /* Ignore early vblank irqs */
+       if (intel_crtc == NULL)
+               return;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       work = intel_crtc->unpin_work;
+
+       if (work == NULL || work->pending || !work->enable_stall_check) {
+               /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               return;
+       }
+
+       /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+       obj_priv = to_intel_bo(work->pending_flip_obj);
+       if(IS_I965G(dev)) {
+               int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+               stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+       } else {
+               int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+               stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+                                                       crtc->y * crtc->fb->pitch +
+                                                       crtc->x * crtc->fb->bits_per_pixel/8);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       if (stall_detected) {
+               DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+               intel_prepare_page_flip(dev, intel_crtc->plane);
+       }
+}
+
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                if (pipea_stats & vblank_status) {
                        vblank++;
                        drm_handle_vblank(dev, 0);
-                       if (!dev_priv->flip_pending_is_done)
+                       if (!dev_priv->flip_pending_is_done) {
+                               i915_pageflip_stall_check(dev, 0);
                                intel_finish_page_flip(dev, 0);
+                       }
                }
 
                if (pipeb_stats & vblank_status) {
                        vblank++;
                        drm_handle_vblank(dev, 1);
-                       if (!dev_priv->flip_pending_is_done)
+                       if (!dev_priv->flip_pending_is_done) {
+                               i915_pageflip_stall_check(dev, 1);
                                intel_finish_page_flip(dev, 1);
+                       }
                }
 
                if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
index 67e3ec1a6af9dc482870e842b3739a86581176ae..d094e91292234507c82ea57cb57947a019318aa6 100644 (file)
 
 #define MI_MODE                0x0209c
 # define VS_TIMER_DISPATCH                             (1 << 6)
+# define MI_FLUSH_ENABLE                               (1 << 11)
 
 #define SCPD0          0x0209c /* 915+ only */
 #define IER            0x020a0
index 23157e1de3bec24339f21ed95fac72bd341ab0fb..40cc5da264a9bdf5520909ba39ca2cb21c1ca5ca 100644 (file)
@@ -990,9 +990,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
 
+       /* Clear existing vblank status. Note this will clear any other
+        * sticky status fields as well.
+        *
+        * This races with i915_driver_irq_handler() with the result
+        * that either function could miss a vblank event.  Here it is not
+        * fatal, as we will either wait upon the next vblank interrupt or
+        * timeout.  Generally speaking intel_wait_for_vblank() is only
+        * called during modeset at which time the GPU should be idle and
+        * should *not* be performing page flips and thus not waiting on
+        * vblanks...
+        * Currently, the result of us stealing a vblank from the irq
+        * handler is that a single frame will be skipped during swapbuffers.
+        */
+       I915_WRITE(pipestat_reg,
+                  I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
        /* Wait for vblank interrupt bit to set */
        if (wait_for((I915_READ(pipestat_reg) &
-                     PIPE_VBLANK_INTERRUPT_STATUS) == 0,
+                     PIPE_VBLANK_INTERRUPT_STATUS),
                     50, 0))
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
@@ -1486,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                        dspcntr &= ~DISPPLANE_TILED;
        }
 
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                /* must disable */
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
@@ -1495,20 +1511,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        Start = obj_priv->gtt_offset;
        Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
-       DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+                     Start, Offset, x, y, fb->pitch);
        I915_WRITE(dspstride, fb->pitch);
        if (IS_I965G(dev)) {
-               I915_WRITE(dspbase, Offset);
-               I915_READ(dspbase);
                I915_WRITE(dspsurf, Start);
-               I915_READ(dspsurf);
                I915_WRITE(dsptileoff, (y << 16) | x);
+               I915_WRITE(dspbase, Offset);
        } else {
                I915_WRITE(dspbase, Start + Offset);
-               I915_READ(dspbase);
        }
+       POSTING_READ(dspbase);
 
-       if ((IS_I965G(dev) || plane == 0))
+       if (IS_I965G(dev) || plane == 0)
                intel_update_fbc(crtc, &crtc->mode);
 
        intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1522,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_framebuffer *intel_fb;
@@ -1530,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_gem_object *obj;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       unsigned long Start, Offset;
-       int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
-       int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
-       int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
-       int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
-       int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
-       u32 dspcntr;
        int ret;
 
        /* no fb bound */
@@ -1572,71 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       dspcntr = I915_READ(dspcntr_reg);
-       /* Mask out pixel format bits in case we change it */
-       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-       switch (crtc->fb->bits_per_pixel) {
-       case 8:
-               dspcntr |= DISPPLANE_8BPP;
-               break;
-       case 16:
-               if (crtc->fb->depth == 15)
-                       dspcntr |= DISPPLANE_15_16BPP;
-               else
-                       dspcntr |= DISPPLANE_16BPP;
-               break;
-       case 24:
-       case 32:
-               if (crtc->fb->depth == 30)
-                       dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
-               else
-                       dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-               break;
-       default:
-               DRM_ERROR("Unknown color depth\n");
+       ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+       if (ret) {
                i915_gem_object_unpin(obj);
                mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-       if (IS_I965G(dev)) {
-               if (obj_priv->tiling_mode != I915_TILING_NONE)
-                       dspcntr |= DISPPLANE_TILED;
-               else
-                       dspcntr &= ~DISPPLANE_TILED;
-       }
-
-       if (HAS_PCH_SPLIT(dev))
-               /* must disable */
-               dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
-       I915_WRITE(dspcntr_reg, dspcntr);
-
-       Start = obj_priv->gtt_offset;
-       Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
-
-       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, crtc->fb->pitch);
-       I915_WRITE(dspstride, crtc->fb->pitch);
-       if (IS_I965G(dev)) {
-               I915_WRITE(dspsurf, Start);
-               I915_WRITE(dsptileoff, (y << 16) | x);
-               I915_WRITE(dspbase, Offset);
-       } else {
-               I915_WRITE(dspbase, Start + Offset);
+               return ret;
        }
-       POSTING_READ(dspbase);
-
-       if ((IS_I965G(dev) || plane == 0))
-               intel_update_fbc(crtc, &crtc->mode);
-
-       intel_wait_for_vblank(dev, pipe);
 
        if (old_fb) {
                intel_fb = to_intel_framebuffer(old_fb);
                obj_priv = to_intel_bo(intel_fb->obj);
                i915_gem_object_unpin(intel_fb->obj);
        }
-       intel_increase_pllclock(crtc, true);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -1911,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
        int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
        int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
        int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
-       int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
-       int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
-       int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
        int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
        int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
        int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1982,15 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                }
 
                /* Enable panel fitting for LVDS */
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
-                   || HAS_eDP || intel_pch_has_edp(crtc)) {
-                       if (dev_priv->pch_pf_size) {
-                               temp = I915_READ(pf_ctl_reg);
-                               I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
-                               I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos);
-                               I915_WRITE(pf_win_size, dev_priv->pch_pf_size);
-                       } else
-                               I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+               if (dev_priv->pch_pf_size &&
+                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+                   || HAS_eDP || intel_pch_has_edp(crtc))) {
+                       /* Force use of hard-coded filter coefficients
+                        * as some pre-programmed values are broken,
+                        * e.g. x201.
+                        */
+                       I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+                                  PF_ENABLE | PF_FILTER_MED_3x3);
+                       I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+                                  dev_priv->pch_pf_pos);
+                       I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+                                  dev_priv->pch_pf_size);
                }
 
                /* Enable CPU pipe */
@@ -2115,7 +2070,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
                        I915_READ(transconf_reg);
 
-                       if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0))
+                       if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
                                DRM_ERROR("failed to enable transcoder\n");
                }
 
@@ -2155,14 +2110,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                udelay(100);
 
                /* Disable PF */
-               temp = I915_READ(pf_ctl_reg);
-               if ((temp & PF_ENABLE) != 0) {
-                       I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
-                       I915_READ(pf_ctl_reg);
-               }
-               I915_WRITE(pf_win_size, 0);
-               POSTING_READ(pf_win_size);
-
+               I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+               I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
 
                /* disable CPU FDI tx and PCH FDI rx */
                temp = I915_READ(fdi_tx_reg);
@@ -2421,6 +2370,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
        int pipe = intel_crtc->pipe;
        bool enabled;
 
+       if (intel_crtc->dpms_mode == mode)
+               return;
+
        intel_crtc->dpms_mode = mode;
        intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
 
@@ -3554,10 +3506,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
-       bool is_edp = false;
+       struct intel_encoder *has_edp_encoder = NULL;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_encoder *encoder;
-       struct intel_encoder *intel_encoder = NULL;
        const intel_limit_t *limit;
        int ret;
        struct fdi_m_n m_n = {0};
@@ -3578,12 +3529,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        drm_vblank_pre_modeset(dev, pipe);
 
        list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+               struct intel_encoder *intel_encoder;
 
-               if (!encoder || encoder->crtc != crtc)
+               if (encoder->crtc != crtc)
                        continue;
 
                intel_encoder = enc_to_intel_encoder(encoder);
-
                switch (intel_encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
@@ -3607,7 +3558,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        is_dp = true;
                        break;
                case INTEL_OUTPUT_EDP:
-                       is_edp = true;
+                       has_edp_encoder = intel_encoder;
                        break;
                }
 
@@ -3685,10 +3636,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                int lane = 0, link_bw, bpp;
                /* eDP doesn't require FDI link, so just set DP M/N
                   according to current link config */
-               if (is_edp) {
+               if (has_edp_encoder) {
                        target_clock = mode->clock;
-                       intel_edp_link_config(intel_encoder,
-                                       &lane, &link_bw);
+                       intel_edp_link_config(has_edp_encoder,
+                                             &lane, &link_bw);
                } else {
                        /* DP over FDI requires target mode clock
                           instead of link clock */
@@ -3709,7 +3660,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                temp |= PIPE_8BPC;
                        else
                                temp |= PIPE_6BPC;
-               } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
+               } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
                        switch (dev_priv->edp_bpp/3) {
                        case 8:
                                temp |= PIPE_8BPC;
@@ -3782,7 +3733,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
                udelay(200);
 
-               if (is_edp) {
+               if (has_edp_encoder) {
                        if (dev_priv->lvds_use_ssc) {
                                temp |= DREF_SSC1_ENABLE;
                                I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3931,7 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                dpll_reg = pch_dpll_reg;
        }
 
-       if (!is_edp) {
+       if (!has_edp_encoder) {
                I915_WRITE(fp_reg, fp);
                I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
                I915_READ(dpll_reg);
@@ -4026,7 +3977,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
-       if (!is_edp) {
+       if (!has_edp_encoder) {
                I915_WRITE(fp_reg, fp);
                I915_WRITE(dpll_reg, dpll);
                I915_READ(dpll_reg);
@@ -4105,7 +4056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                I915_WRITE(link_m1_reg, m_n.link_m);
                I915_WRITE(link_n1_reg, m_n.link_n);
 
-               if (is_edp) {
+               if (has_edp_encoder) {
                        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
                } else {
                        /* enable FDI RX PLL too */
@@ -4911,15 +4862,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
        kfree(intel_crtc);
 }
 
-struct intel_unpin_work {
-       struct work_struct work;
-       struct drm_device *dev;
-       struct drm_gem_object *old_fb_obj;
-       struct drm_gem_object *pending_flip_obj;
-       struct drm_pending_vblank_event *event;
-       int pending;
-};
-
 static void intel_unpin_work_fn(struct work_struct *__work)
 {
        struct intel_unpin_work *work =
@@ -5007,7 +4949,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
 
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
-               intel_crtc->unpin_work->pending = 1;
+               if ((++intel_crtc->unpin_work->pending) > 1)
+                       DRM_ERROR("Prepared flip multiple times\n");
        } else {
                DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
        }
@@ -5026,9 +4969,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        unsigned long flags, offset;
-       int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
-       int ret, pipesrc;
-       u32 flip_mask;
+       int pipe = intel_crtc->pipe;
+       u32 pf, pipesrc;
+       int ret;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL)
@@ -5077,42 +5020,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        atomic_inc(&obj_priv->pending_flip);
        work->pending_flip_obj = obj;
 
-       if (intel_crtc->plane)
-               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-       else
-               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
        if (IS_GEN3(dev) || IS_GEN2(dev)) {
+               u32 flip_mask;
+
+               if (intel_crtc->plane)
+                       flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+               else
+                       flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
                BEGIN_LP_RING(2);
                OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
                OUT_RING(0);
                ADVANCE_LP_RING();
        }
 
+       work->enable_stall_check = true;
+
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = obj_priv->gtt_offset;
-       offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
 
        BEGIN_LP_RING(4);
-       if (IS_I965G(dev)) {
+       switch(INTEL_INFO(dev)->gen) {
+       case 2:
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(offset | obj_priv->tiling_mode);
-               pipesrc = I915_READ(pipesrc_reg); 
-               OUT_RING(pipesrc & 0x0fff0fff);
-       } else if (IS_GEN3(dev)) {
+               OUT_RING(obj_priv->gtt_offset + offset);
+               OUT_RING(MI_NOOP);
+               break;
+
+       case 3:
                OUT_RING(MI_DISPLAY_FLIP_I915 |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(offset);
+               OUT_RING(obj_priv->gtt_offset + offset);
                OUT_RING(MI_NOOP);
-       } else {
+               break;
+
+       case 4:
+       case 5:
+               /* i965+ uses the linear or tiled offsets from the
+                * Display Registers (which do not change across a page-flip)
+                * so we need only reprogram the base address.
+                */
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(offset);
-               OUT_RING(MI_NOOP);
+               OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+
+               /* XXX Enabling the panel-fitter across page-flip is so far
+                * untested on non-native modes, so ignore it for now.
+                * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+                */
+               pf = 0;
+               pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+               OUT_RING(pf | pipesrc);
+               break;
+
+       case 6:
+               OUT_RING(MI_DISPLAY_FLIP |
+                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+               OUT_RING(fb->pitch | obj_priv->tiling_mode);
+               OUT_RING(obj_priv->gtt_offset);
+
+               pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+               pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+               OUT_RING(pf | pipesrc);
+               break;
        }
        ADVANCE_LP_RING();
 
@@ -5193,7 +5167,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
        intel_crtc->cursor_addr = 0;
-       intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+       intel_crtc->dpms_mode = -1;
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
        intel_crtc->busy = false;
index 9caccd03dccb6841fea4be4c5fa43f5e8822ee33..51d142939a26e9abe76fdfe8a90aa94ff0f3b612 100644 (file)
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        uint32_t ch_data = ch_ctl + 4;
        int i;
        int recv_bytes;
-       uint32_t ctl;
        uint32_t status;
        uint32_t aux_clock_divider;
        int try, precharge;
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        else
                precharge = 5;
 
+       if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+               DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+                         I915_READ(ch_ctl));
+               return -EBUSY;
+       }
+
        /* Must try at least 3 times according to DP spec */
        for (try = 0; try < 5; try++) {
                /* Load the send data into the aux channel data registers */
-               for (i = 0; i < send_bytes; i += 4) {
-                       uint32_t    d = pack_aux(send + i, send_bytes - i);
-       
-                       I915_WRITE(ch_data + i, d);
-               }
-       
-               ctl = (DP_AUX_CH_CTL_SEND_BUSY |
-                      DP_AUX_CH_CTL_TIME_OUT_400us |
-                      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                      (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                      (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
-                      DP_AUX_CH_CTL_DONE |
-                      DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                      DP_AUX_CH_CTL_RECEIVE_ERROR);
+               for (i = 0; i < send_bytes; i += 4)
+                       I915_WRITE(ch_data + i,
+                                  pack_aux(send + i, send_bytes - i));
        
                /* Send the command and wait for it to complete */
-               I915_WRITE(ch_ctl, ctl);
-               (void) I915_READ(ch_ctl);
+               I915_WRITE(ch_ctl,
+                          DP_AUX_CH_CTL_SEND_BUSY |
+                          DP_AUX_CH_CTL_TIME_OUT_400us |
+                          (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                          (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                          (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+                          DP_AUX_CH_CTL_DONE |
+                          DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                          DP_AUX_CH_CTL_RECEIVE_ERROR);
                for (;;) {
-                       udelay(100);
                        status = I915_READ(ch_ctl);
                        if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
                                break;
+                       udelay(100);
                }
        
                /* Clear done status and any errors */
-               I915_WRITE(ch_ctl, (status |
-                               DP_AUX_CH_CTL_DONE |
-                               DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                               DP_AUX_CH_CTL_RECEIVE_ERROR));
-               (void) I915_READ(ch_ctl);
-               if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+               I915_WRITE(ch_ctl,
+                          status |
+                          DP_AUX_CH_CTL_DONE |
+                          DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                          DP_AUX_CH_CTL_RECEIVE_ERROR);
+               if (status & DP_AUX_CH_CTL_DONE)
                        break;
        }
 
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        /* Unload any bytes sent back from the other side */
        recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
                      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
        if (recv_bytes > recv_size)
                recv_bytes = recv_size;
        
-       for (i = 0; i < recv_bytes; i += 4) {
-               uint32_t    d = I915_READ(ch_data + i);
-
-               unpack_aux(d, recv + i, recv_bytes - i);
-       }
+       for (i = 0; i < recv_bytes; i += 4)
+               unpack_aux(I915_READ(ch_data + i),
+                          recv + i, recv_bytes - i);
 
        return recv_bytes;
 }
index 0e92aa07b38252a4ac3457e75dc2b5113d8d9ded..ad312ca6b3e570125732168b3c2f670467264beb 100644 (file)
@@ -176,6 +176,16 @@ struct intel_crtc {
 #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 
+struct intel_unpin_work {
+       struct work_struct work;
+       struct drm_device *dev;
+       struct drm_gem_object *old_fb_obj;
+       struct drm_gem_object *pending_flip_obj;
+       struct drm_pending_vblank_event *event;
+       int pending;
+       bool enable_stall_check;
+};
+
 struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
                                     const char *name);
 void intel_i2c_destroy(struct i2c_adapter *adapter);
index 4f00390d7c616b1c87ffe145082e1913a04935d0..1d306a458be6463c8e1f6636d69ebf515d845889 100644 (file)
@@ -25,6 +25,8 @@
  *
  * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
  */
+
+#include <linux/seq_file.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
index 51e9c9e718c4c16453659ef873f7d30b88f18f9e..cb3508f78bc350735e16962f8283e598c53deca6 100644 (file)
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret = init_ring_common(dev, ring);
+       int mode;
+
        if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                               (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+               mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+               if (IS_GEN6(dev))
+                       mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+               I915_WRITE(MI_MODE, mode);
        }
        return ret;
 }
index 093e914e8a41a3d58b645dcf8475cb24312b3e1e..e3b7a7ee39cb97b390048c1a5ce9187b94b6e51d 100644 (file)
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
                        return false;
 
-               if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
-                       return false;
+               (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
        } else if (intel_sdvo->is_lvds) {
                drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
 
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                                                            intel_sdvo->sdvo_lvds_fixed_mode))
                        return false;
 
-               if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
-                       return false;
+               (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
        }
 
        /* Make the CRTC code factor in the SDVO pixel multiplier.  The
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        in_out.in0 = intel_sdvo->attached_output;
        in_out.in1 = 0;
 
-       if (!intel_sdvo_set_value(intel_sdvo,
-                                 SDVO_CMD_SET_IN_OUT_MAP,
-                                 &in_out, sizeof(in_out)))
-               return;
+       intel_sdvo_set_value(intel_sdvo,
+                            SDVO_CMD_SET_IN_OUT_MAP,
+                            &in_out, sizeof(in_out));
 
        if (intel_sdvo->is_hdmi) {
                if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 
        /* We have tried to get input timing in mode_fixup, and filled into
           adjusted_mode */
-       if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
-               intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+       intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+       if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
                input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
-       } else
-               intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
 
        /* If it's a TV, we already set the output timing in mode_fixup.
         * Otherwise, the output timing is equal to the input timing.
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                                                  intel_sdvo->attached_output))
                        return;
 
-               if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd))
-                       return;
+               (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
        }
 
        /* Set the input timing to the screen. Assume always input 0. */
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                intel_sdvo_set_input_timing(encoder, &input_dtd);
        }
 #else
-       if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
-               return;
+       (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
 #endif
 
        sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
@@ -1932,6 +1929,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
        .destroy = intel_sdvo_enc_destroy,
 };
 
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+       uint16_t mask = 0;
+       unsigned int num_bits;
+
+       /* Make a mask of outputs less than or equal to our own priority in the
+        * list.
+        */
+       switch (sdvo->controlled_output) {
+       case SDVO_OUTPUT_LVDS1:
+               mask |= SDVO_OUTPUT_LVDS1;
+       case SDVO_OUTPUT_LVDS0:
+               mask |= SDVO_OUTPUT_LVDS0;
+       case SDVO_OUTPUT_TMDS1:
+               mask |= SDVO_OUTPUT_TMDS1;
+       case SDVO_OUTPUT_TMDS0:
+               mask |= SDVO_OUTPUT_TMDS0;
+       case SDVO_OUTPUT_RGB1:
+               mask |= SDVO_OUTPUT_RGB1;
+       case SDVO_OUTPUT_RGB0:
+               mask |= SDVO_OUTPUT_RGB0;
+               break;
+       }
+
+       /* Count bits to find what number we are in the priority list. */
+       mask &= sdvo->caps.output_flags;
+       num_bits = hweight16(mask);
+       /* If more than 3 outputs, default to DDC bus 3 for now. */
+       if (num_bits > 3)
+               num_bits = 3;
+
+       /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+       sdvo->ddc_bus = 1 << num_bits;
+}
 
 /**
  * Choose the appropriate DDC bus for control bus switch command for this
@@ -1951,7 +1983,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
        else
                mapping = &(dev_priv->sdvo_mappings[1]);
 
-       sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+       if (mapping->initialized)
+               sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+       else
+               intel_sdvo_guess_ddc_bus(sdvo);
 }
 
 static bool
index d2029efee982c3baff92538556ddae0b7a44ea9b..c671f60ce80bac917a61c1c60cdb02e692afcb85 100644 (file)
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
        struct drm_encoder *encoder = &intel_tv->base.enc;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        unsigned long irqflags;
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
                   DAC_C_0_7_V);
        I915_WRITE(TV_CTL, tv_ctl);
        I915_WRITE(TV_DAC, tv_dac);
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       POSTING_READ(TV_DAC);
+       msleep(20);
+
        tv_dac = I915_READ(TV_DAC);
        I915_WRITE(TV_DAC, save_tv_dac);
        I915_WRITE(TV_CTL, save_tv_ctl);
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       POSTING_READ(TV_CTL);
+       msleep(20);
+
        /*
         *  A B C
         *  0 1 1 Composite
index fff82045c427eec5372c6ca3747f64bd16dcabd0..9ce2827f8c00d53a8b7a793f785024c846084d84 100644 (file)
@@ -1085,19 +1085,19 @@ file_priv)
 }
 
 struct drm_ioctl_desc mga_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 };
 
 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
index 0b69a9628c95dee9585584672a1ec9f820aa600e..974b0f8ae0483cc462b1be9a81795413e7a7a0c1 100644 (file)
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
        uint32_t val = 0;
 
        if (off < pci_resource_len(dev->pdev, 1)) {
-               uint32_t __iomem *p =
+               uint8_t __iomem *p =
                        io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 
                val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
        uint32_t off, uint32_t val)
 {
        if (off < pci_resource_len(dev->pdev, 1)) {
-               uint32_t __iomem *p =
+               uint8_t __iomem *p =
                        io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 
                iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
        }
 #ifdef __powerpc__
        /* Powerbook specific quirks */
-       if ((dev->pci_device & 0xffff) == 0x0179 ||
-           (dev->pci_device & 0xffff) == 0x0189 ||
-           (dev->pci_device & 0xffff) == 0x0329) {
-               if (script == LVDS_RESET) {
-                       nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
-
-               } else if (script == LVDS_PANEL_ON) {
-                       bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
-                                 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
-                                 | (1 << 31));
-                       bios_wr32(bios, NV_PCRTC_GPIO_EXT,
-                                 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
-
-               } else if (script == LVDS_PANEL_OFF) {
-                       bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
-                                 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
-                                 & ~(1 << 31));
-                       bios_wr32(bios, NV_PCRTC_GPIO_EXT,
-                                 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
-               }
-       }
+       if (script == LVDS_RESET &&
+           (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+            dev->pci_device == 0x0329))
+               nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
 #endif
 
        return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
         *
         * For the moment, a quirk will do :)
         */
-       if ((dev->pdev->device == 0x01d7) &&
-           (dev->pdev->subsystem_vendor == 0x1028) &&
-           (dev->pdev->subsystem_device == 0x01c2)) {
+       if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
                bios->fp.duallink_transition_clk = 80000;
-       }
 
        /* set dual_link flag for EDID case */
        if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4587,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -1) {
@@ -4597,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -2) {
@@ -4610,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk > 0) {
@@ -4622,7 +4602,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk < 0) {
@@ -4634,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        }
 
@@ -5357,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
        }
 
        tmdstableptr = ROM16(bios->data[bitentry->offset]);
-
-       if (tmdstableptr == 0x0) {
+       if (!tmdstableptr) {
                NV_ERROR(dev, "Pointer to TMDS table invalid\n");
                return -EINVAL;
        }
 
+       NV_INFO(dev, "TMDS table version %d.%d\n",
+               bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
        /* nv50+ has v2.0, but we don't parse it atm */
-       if (bios->data[tmdstableptr] != 0x11) {
-               NV_WARN(dev,
-                       "TMDS table revision %d.%d not currently supported\n",
-                       bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+       if (bios->data[tmdstableptr] != 0x11)
                return -ENOSYS;
-       }
 
        /*
         * These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios)
                        gpio->line = tvdac_gpio[1] >> 4;
                        gpio->invert = tvdac_gpio[0] & 2;
                }
+       } else {
+               /*
+                * No systematic way to store GPIO info on pre-v2.2
+                * DCBs, try to match the PCI device IDs.
+                */
+
+               /* Apple iMac G4 NV18 */
+               if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+                       struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+                       gpio->tag = DCB_GPIO_TVDAC0;
+                       gpio->line = 4;
+               }
+
        }
 
        if (!gpio_table_ptr)
@@ -5884,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
        struct drm_device *dev = bios->dev;
 
        /* Gigabyte NX85T */
-       if ((dev->pdev->device == 0x0421) &&
-           (dev->pdev->subsystem_vendor == 0x1458) &&
-           (dev->pdev->subsystem_device == 0x344c)) {
+       if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
                if (cte->type == DCB_CONNECTOR_HDMI_1)
                        cte->type = DCB_CONNECTOR_DVI_I;
        }
@@ -6139,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
                        entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
 
                break;
-       case 0xe:
+       case OUTPUT_EOL:
                /* weird g80 mobile type that "nv" treats as a terminator */
                dcb->entries--;
                return false;
@@ -6176,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
                entry->type = OUTPUT_TV;
                break;
        case 2:
-       case 3:
-               entry->type = OUTPUT_LVDS;
-               break;
        case 4:
-               switch ((conn & 0x000000f0) >> 4) {
-               case 0:
-                       entry->type = OUTPUT_TMDS;
-                       break;
-               case 1:
+               if (conn & 0x10)
                        entry->type = OUTPUT_LVDS;
-                       break;
-               default:
-                       NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
-                                (conn & 0x000000f0) >> 4);
-                       return false;
-               }
+               else
+                       entry->type = OUTPUT_TMDS;
+               break;
+       case 3:
+               entry->type = OUTPUT_LVDS;
                break;
        default:
                NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6307,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
         * nasty problems until this is sorted (assuming it's not a
         * VBIOS bug).
         */
-       if ((dev->pdev->device == 0x040d) &&
-           (dev->pdev->subsystem_vendor == 0x1028) &&
-           (dev->pdev->subsystem_device == 0x019b)) {
+       if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
                if (*conn == 0x02026312 && *conf == 0x00000020)
                        return false;
        }
index fd14dfd3d780f6b45a280d1cf4c4aed6eb41ed20..c1de2f3fcb0ea7e78ec193d88c74683d5090eaba 100644 (file)
@@ -95,6 +95,7 @@ enum dcb_type {
        OUTPUT_TMDS = 2,
        OUTPUT_LVDS = 3,
        OUTPUT_DP = 6,
+       OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
        OUTPUT_ANY = -1
 };
 
index 84f85183d041f38d9d848137abf24b236a32626f..f6f44779d82fb801ac738cc9e5f83e4179fd6de3 100644 (file)
 #include <linux/log2.h>
 #include <linux/slab.h>
 
+int
+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
+{
+       struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+       int ret;
+
+       if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
+               return 0;
+
+       spin_lock(&nvbo->bo.lock);
+       ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+       spin_unlock(&nvbo->bo.lock);
+       return ret;
+}
+
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
index 90fdcda332be360ab07cb862c9a6ad533724a55c..0480f064f2c14fd8c4bf41672fa039f9f2bc9029 100644 (file)
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
index b1b22baf14284a6d2105a320f7602f3e81b00529..a1473fff06ac2d61bd3f629dcc9527be5975f165 100644 (file)
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
        int i;
 
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct nouveau_i2c_chan *i2c;
+               struct nouveau_i2c_chan *i2c = NULL;
                struct nouveau_encoder *nv_encoder;
                struct drm_mode_object *obj;
                int id;
@@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
                if (!obj)
                        continue;
                nv_encoder = nouveau_encoder(obj_to_encoder(obj));
-               i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+               if (nv_encoder->dcb->i2c_index < 0xf)
+                       i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
 
                if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
                        *pnv_encoder = nv_encoder;
index e424bf74d706307c237c7c3976498dafea0fd29c..b1be617373b63891dc05b09f2dc75807cd8b8472 100644 (file)
@@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
@@ -1388,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
        return false;
 }
 
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+               unsigned sub_vendor, unsigned sub_device)
+{
+       return dev->pdev->device == device &&
+               dev->pdev->subsystem_vendor == sub_vendor &&
+               dev->pdev->subsystem_device == sub_device;
+}
+
 #define NV_SW                                                        0x0000506e
 #define NV_SW_DMA_SEMAPHORE                                          0x00000060
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
index 6b208ffafa8de1df0bdd5dd6ff6aede30f929341..87ac21ec23d290db82e90c6fe6257e4cffdb5955 100644 (file)
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
        struct nouveau_fence *fence;
        uint32_t sequence;
 
+       spin_lock(&chan->fence.lock);
+
        if (USE_REFCNT)
                sequence = nvchan_rd32(chan, 0x48);
        else
                sequence = atomic_read(&chan->fence.last_sequence_irq);
 
        if (chan->fence.sequence_ack == sequence)
-               return;
+               goto out;
        chan->fence.sequence_ack = sequence;
 
-       spin_lock(&chan->fence.lock);
        list_for_each_safe(entry, tmp, &chan->fence.pending) {
                fence = list_entry(entry, struct nouveau_fence, entry);
 
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
                if (sequence == chan->fence.sequence_ack)
                        break;
        }
+out:
        spin_unlock(&chan->fence.lock);
 }
 
index 0f417ac1b696b769a0982f762bfa696a50637de1..ead7b8fc53fcbcd473dbdc7a97d893a3e2e9c454 100644 (file)
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
                list_del(&nvbo->entry);
                nvbo->reserved_by = NULL;
                ttm_bo_unreserve(&nvbo->bo);
-               drm_gem_object_unreference(nvbo->gem);
+               drm_gem_object_unreference_unlocked(nvbo->gem);
        }
 }
 
@@ -300,7 +300,7 @@ retry:
                        validate_fini(op, NULL);
                        if (ret == -EAGAIN)
                                ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
-                       drm_gem_object_unreference(gem);
+                       drm_gem_object_unreference_unlocked(gem);
                        if (ret) {
                                NV_ERROR(dev, "fail reserve\n");
                                return ret;
@@ -337,7 +337,9 @@ retry:
                                return -EINVAL;
                        }
 
+                       mutex_unlock(&drm_global_mutex);
                        ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+                       mutex_lock(&drm_global_mutex);
                        if (ret) {
                                NV_ERROR(dev, "fail wait_cpu\n");
                                return ret;
@@ -361,16 +363,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
-               struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
 
-               if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
-                       spin_lock(&nvbo->bo.lock);
-                       ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-                       spin_unlock(&nvbo->bo.lock);
-                       if (unlikely(ret)) {
-                               NV_ERROR(dev, "fail wait other chan\n");
-                               return ret;
-                       }
+               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               if (unlikely(ret)) {
+                       NV_ERROR(dev, "fail pre-validate sync\n");
+                       return ret;
                }
 
                ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +378,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               nvbo->channel = chan;
+               nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
                ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
                                      false, false, false);
                nvbo->channel = NULL;
@@ -390,6 +387,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
+               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               if (unlikely(ret)) {
+                       NV_ERROR(dev, "fail post-validate sync\n");
+                       return ret;
+               }
+
                if (nvbo->bo.offset == b->presumed.offset &&
                    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
                      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +616,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                return PTR_ERR(bo);
        }
 
-       mutex_lock(&dev->struct_mutex);
+       /* Mark push buffers as being used on PFIFO, the validation code
+        * will then make sure that if the pushbuf bo moves, that they
+        * happen on the kernel channel, which will in turn cause a sync
+        * to happen before we try and submit the push buffer.
+        */
+       for (i = 0; i < req->nr_push; i++) {
+               if (push[i].bo_index >= req->nr_buffers) {
+                       NV_ERROR(dev, "push %d buffer not in list\n", i);
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               bo[push[i].bo_index].read_domains |= (1 << 31);
+       }
 
        /* Validate buffer list */
        ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +663,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                                      push[i].length);
                }
        } else
-       if (dev_priv->card_type >= NV_20) {
+       if (dev_priv->chipset >= 0x25) {
                ret = RING_SPACE(chan, req->nr_push * 2);
                if (ret) {
                        NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 out:
        validate_fini(&op, fence);
        nouveau_fence_unref((void**)&fence);
-       mutex_unlock(&dev->struct_mutex);
        kfree(bo);
        kfree(push);
 
@@ -722,7 +737,7 @@ out_next:
                req->suffix0 = 0x00000000;
                req->suffix1 = 0x00000000;
        } else
-       if (dev_priv->card_type >= NV_20) {
+       if (dev_priv->chipset >= 0x25) {
                req->suffix0 = 0x00020000;
                req->suffix1 = 0x00000000;
        } else {
index 0bd407ca3d429117ac3cdf0e42f1ccf5b5e853d7..84614858728ba0f7ab959b54989cf9a512d89d6a 100644 (file)
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
        if (entry->chan)
                return -EEXIST;
 
-       if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
+       if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
                NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
                return -EINVAL;
        }
index 491767fe4fcfd3b705c3bdb82b4ebeded1bfe9bb..6b9187d7f67de4383502973ef5cdfc9259ec6f1a 100644 (file)
@@ -214,6 +214,7 @@ int
 nouveau_sgdma_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev = dev->pdev;
        struct nouveau_gpuobj *gpuobj = NULL;
        uint32_t aper_size, obj_size;
        int i, ret;
@@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev)
 
        dev_priv->gart_info.sg_dummy_page =
                alloc_page(GFP_KERNEL|__GFP_DMA32);
+       if (!dev_priv->gart_info.sg_dummy_page) {
+               nouveau_gpuobj_del(dev, &gpuobj);
+               return -ENOMEM;
+       }
+
        set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
        dev_priv->gart_info.sg_dummy_bus =
-               pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+               pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
+               nouveau_gpuobj_del(dev, &gpuobj);
+               return -EFAULT;
+       }
 
        if (dev_priv->card_type < NV_50) {
                /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
index a5dcf7685800c4effb3d1c4938736b4c48c36eb0..0d3206a7046cb008814237250d5fa9fe2502ac2c 100644 (file)
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct dcb_entry *dcbe = nv_encoder->dcb;
        int head = nouveau_crtc(encoder->crtc)->index;
+       struct drm_encoder *slave_encoder;
 
        if (dcbe->type == OUTPUT_TMDS)
                run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
                NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
 
        /* Init external transmitters */
-       if (get_tmds_slave(encoder))
-               get_slave_funcs(get_tmds_slave(encoder))->mode_set(
-                       encoder, &nv_encoder->mode, &nv_encoder->mode);
+       slave_encoder = get_tmds_slave(encoder);
+       if (slave_encoder)
+               get_slave_funcs(slave_encoder)->mode_set(
+                       slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
 
        helper->dpms(encoder, DRM_MODE_DPMS_ON);
 
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
                nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
 }
 
+static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+{
+#ifdef __powerpc__
+       struct drm_device *dev = encoder->dev;
+
+       /* BIOS scripts usually take care of the backlight, thanks
+        * Apple for your consistency.
+        */
+       if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+           dev->pci_device == 0x0329) {
+               if (mode == DRM_MODE_DPMS_ON) {
+                       nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+                       nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
+               } else {
+                       nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+                       nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
+               }
+       }
+#endif
+}
+
 static inline bool is_powersaving_dpms(int mode)
 {
        return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
                                         LVDS_PANEL_OFF, 0);
        }
 
+       nv04_dfp_update_backlight(encoder, mode);
        nv04_dfp_update_fp_control(encoder, mode);
 
        if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
        NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
                     mode, nv_encoder->dcb->index);
 
+       nv04_dfp_update_backlight(encoder, mode);
        nv04_dfp_update_fp_control(encoder, mode);
 }
 
index 44fefb0c7083caf5c4e77011675927dc3d60aec6..13cdc05b7c2d0f4e7785cbe428ce4ab86205bd67 100644 (file)
@@ -121,10 +121,14 @@ static bool
 get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
 {
        /* Zotac FX5200 */
-       if (dev->pdev->device == 0x0322 &&
-           dev->pdev->subsystem_vendor == 0x19da &&
-           (dev->pdev->subsystem_device == 0x1035 ||
-            dev->pdev->subsystem_device == 0x2035)) {
+       if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
+           nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
+               *pin_mask = 0xc;
+               return false;
+       }
+
+       /* MSI nForce2 IGP */
+       if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
                *pin_mask = 0xc;
                return false;
        }
index 37c7b48ab24af8a37c3e8ac29656b989ec5935f5..91ef93cf1f352f89553b7dab4ab9fb4f219326ad 100644 (file)
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
        chan->file_priv = (struct drm_file *)-2;
        dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
 
+       INIT_LIST_HEAD(&chan->ramht_refs);
+
        /* Channel's PRAMIN object + heap */
        ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
                                                        NULL, &chan->ramin);
@@ -278,7 +280,7 @@ nv50_instmem_init(struct drm_device *dev)
        /*XXX: incorrect, but needed to make hash func "work" */
        dev_priv->ramht_offset = 0x10000;
        dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
        return 0;
 }
 
index 3ab3cdc42173300490c7e667eced2f39b0d36690..6b451f864783e5c1ed3610c13c86adb5588a5783 100644 (file)
@@ -142,14 +142,16 @@ int
 nvc0_instmem_suspend(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 *buf;
        int i;
 
        dev_priv->susres.ramin_copy = vmalloc(65536);
        if (!dev_priv->susres.ramin_copy)
                return -ENOMEM;
+       buf = dev_priv->susres.ramin_copy;
 
-       for (i = 0x700000; i < 0x710000; i += 4)
-               dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
+       for (i = 0; i < 65536; i += 4)
+               buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
        return 0;
 }
 
@@ -157,14 +159,15 @@ void
 nvc0_instmem_resume(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 *buf = dev_priv->susres.ramin_copy;
        u64 chan;
        int i;
 
        chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
        nv_wr32(dev, 0x001700, chan >> 16);
 
-       for (i = 0x700000; i < 0x710000; i += 4)
-               nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+       for (i = 0; i < 65536; i += 4)
+               nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
        vfree(dev_priv->susres.ramin_copy);
        dev_priv->susres.ramin_copy = NULL;
 
@@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev)
        /*XXX: incorrect, but needed to make hash func "work" */
        dev_priv->ramht_offset = 0x10000;
        dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
        return 0;
 }
 
index 077af1f2f9b4faee1f3e840f8e4a52813cb05108..a9e33ce65918c98bed3c039dbc65fa8e02bac15e 100644 (file)
@@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
                        r128_do_cleanup_pageflip(dev);
        }
 }
-
 void r128_driver_lastclose(struct drm_device *dev)
 {
        r128_do_cleanup_cce(dev);
 }
 
 struct drm_ioctl_desc r128_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
 };
 
 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
index 12ad512bd3d33ad316655400212626585126c6de..464a81a1990f6f274d46bd0535283bb315680305 100644 (file)
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
        args.usV_SyncWidth =
                cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
 
+       args.ucOverscanRight = radeon_crtc->h_border;
+       args.ucOverscanLeft = radeon_crtc->h_border;
+       args.ucOverscanBottom = radeon_crtc->v_border;
+       args.ucOverscanTop = radeon_crtc->v_border;
+
        if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                misc |= ATOM_VSYNC_POLARITY;
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -471,6 +476,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
        struct radeon_encoder *radeon_encoder = NULL;
        u32 adjusted_clock = mode->clock;
        int encoder_mode = 0;
+       u32 dp_clock = mode->clock;
+       int bpc = 8;
 
        /* reset the pll flags */
        pll->flags = 0;
@@ -513,6 +520,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (encoder->crtc == crtc) {
                        radeon_encoder = to_radeon_encoder(encoder);
                        encoder_mode = atombios_get_encoder_mode(encoder);
+                       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+                               struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+                               if (connector) {
+                                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+                                       struct radeon_connector_atom_dig *dig_connector =
+                                               radeon_connector->con_priv;
+
+                                       dp_clock = dig_connector->dp_clock;
+                               }
+                       }
+
                        if (ASIC_IS_AVIVO(rdev)) {
                                /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
                                if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -521,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        pll->algo = PLL_ALGO_LEGACY;
                                        pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
                                }
+                               /* There is some evidence (often anecdotal) that RV515 LVDS
+                                * (on some boards at least) prefers the legacy algo.  I'm not
+                                * sure whether this should handled generically or on a
+                                * case-by-case quirk basis.  Both algos should work fine in the
+                                * majority of cases.
+                                */
+                               if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
+                                   (rdev->family == CHIP_RV515)) {
+                                       /* allow the user to overrride just in case */
+                                       if (radeon_new_pll == 1)
+                                               pll->algo = PLL_ALGO_NEW;
+                                       else
+                                               pll->algo = PLL_ALGO_LEGACY;
+                               }
                        } else {
                                if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
                                        pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -555,6 +587,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
                                args.v1.ucTransmitterID = radeon_encoder->encoder_id;
                                args.v1.ucEncodeMode = encoder_mode;
+                               if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+                                       /* may want to enable SS on DP eventually */
+                                       /* args.v1.ucConfig |=
+                                          ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+                               } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+                                       args.v1.ucConfig |=
+                                               ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+                               }
 
                                atom_execute_table(rdev->mode_info.atom_context,
                                                   index, (uint32_t *)&args);
@@ -568,10 +608,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
-                                       if (encoder_mode == ATOM_ENCODER_MODE_DP)
+                                       if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+                                               /* may want to enable SS on DP/eDP eventually */
+                                               /*args.v3.sInput.ucDispPllConfig |=
+                                                 DISPPLL_CONFIG_SS_ENABLE;*/
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
-                                       else {
+                                               /* 16200 or 27000 */
+                                               args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+                                       } else {
+                                               if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+                                                       /* deep color support */
+                                                       args.v3.sInput.usPixelClock =
+                                                               cpu_to_le16((mode->clock * bpc / 8) / 10);
+                                               }
                                                if (dig->coherent_mode)
                                                        args.v3.sInput.ucDispPllConfig |=
                                                                DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +630,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                                                DISPPLL_CONFIG_DUAL_LINK;
                                        }
                                } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                                       /* may want to enable SS on DP/eDP eventually */
-                                       /*args.v3.sInput.ucDispPllConfig |=
-                                               DISPPLL_CONFIG_SS_ENABLE;*/
-                                       if (encoder_mode == ATOM_ENCODER_MODE_DP)
+                                       if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+                                               /* may want to enable SS on DP/eDP eventually */
+                                               /*args.v3.sInput.ucDispPllConfig |=
+                                                 DISPPLL_CONFIG_SS_ENABLE;*/
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
-                                       else {
+                                               /* 16200 or 27000 */
+                                               args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+                                       } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+                                               /* want to enable SS on LVDS eventually */
+                                               /*args.v3.sInput.ucDispPllConfig |=
+                                                 DISPPLL_CONFIG_SS_ENABLE;*/
+                                       } else {
                                                if (mode->clock > 165000)
                                                        args.v3.sInput.ucDispPllConfig |=
                                                                DISPPLL_CONFIG_DUAL_LINK;
@@ -1019,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 
        if (rdev->family >= CHIP_RV770) {
                if (radeon_crtc->crtc_id) {
-                       WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
-                       WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+                       WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+                       WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
                } else {
-                       WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
-                       WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+                       WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+                       WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
                }
        }
        WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1160,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_encoder *encoder;
+       bool is_tvcv = false;
 
-       /* TODO color tiling */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               /* find tv std */
+               if (encoder->crtc == crtc) {
+                       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+                       if (radeon_encoder->active_device &
+                           (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+                               is_tvcv = true;
+               }
+       }
 
        atombios_disable_ss(crtc);
        /* always set DCPLL */
@@ -1170,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
        atombios_crtc_set_pll(crtc, adjusted_mode);
        atombios_enable_ss(crtc);
 
-       if (ASIC_IS_AVIVO(rdev))
+       if (ASIC_IS_DCE4(rdev))
                atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
-       else {
+       else if (ASIC_IS_AVIVO(rdev)) {
+               if (is_tvcv)
+                       atombios_crtc_set_timing(crtc, adjusted_mode);
+               else
+                       atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+       } else {
                atombios_crtc_set_timing(crtc, adjusted_mode);
                if (radeon_crtc->crtc_id == 0)
                        atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
index 36e0d4b545e60b674380746496525fed8b88ad39..4e7778d44b8d36fa4491c97de47aa5f4d400919f 100644 (file)
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
                enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
        else
                enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
-       if (dig_connector->linkb)
+       if (dig->linkb)
                enc_id |= ATOM_DP_CONFIG_LINK_B;
        else
                enc_id |= ATOM_DP_CONFIG_LINK_A;
index 957d5067ad9cc1495984f909d724fbf64c8d8856..b8b7f010b25f8df49e20329932c1735482c03ecf 100644 (file)
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
        return 0;
 }
 
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+       int r;
+       uint32_t cp_me;
+
+       r = radeon_ring_lock(rdev, 7);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(rdev, 0x1);
+       radeon_ring_write(rdev, 0x0);
+       radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_unlock_commit(rdev);
+
+       cp_me = 0xff;
+       WREG32(CP_ME_CNTL, cp_me);
+
+       r = radeon_ring_lock(rdev, 4);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+       /* init some VGT regs */
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, 0xe);
+       radeon_ring_write(rdev, 0x10);
+       radeon_ring_unlock_commit(rdev);
+
+       return 0;
+}
+
 int evergreen_cp_resume(struct radeon_device *rdev)
 {
        u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
        rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
-       r600_cp_start(rdev);
+       evergreen_cp_start(rdev);
        rdev->cp.ready = true;
        r = radeon_ring_test(rdev);
        if (r) {
@@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
         */
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
-       /* Initialize clocks */
-       r = radeon_clocks_init(rdev);
-       if (r) {
-               return r;
-       }
 
        r = evergreen_startup(rdev);
        if (r) {
@@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
        radeon_surface_init(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       r = radeon_clocks_init(rdev);
-       if (r)
-               return r;
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
        evergreen_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
-       radeon_clocks_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
index d0ebae9dde25ba400778b210dfc2e6a86179d682..afc18d87fdca7409e4c7462fe3a1b03eeaa6d3ca 100644 (file)
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
        }
        radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
        radeon_ring_write(rdev, 0x1);
-       if (rdev->family >= CHIP_CEDAR) {
-               radeon_ring_write(rdev, 0x0);
-               radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
-       } else if (rdev->family >= CHIP_RV770) {
+       if (rdev->family >= CHIP_RV770) {
                radeon_ring_write(rdev, 0x0);
                radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
        } else {
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
         */
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
-       /* Initialize clocks */
-       r = radeon_clocks_init(rdev);
-       if (r) {
-               return r;
-       }
 
        r = r600_startup(rdev);
        if (r) {
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
        radeon_surface_init(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       r = radeon_clocks_init(rdev);
-       if (r)
-               return r;
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
        radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
-       radeon_clocks_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
@@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
         * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
         */
        if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
-               void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+               void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
                u32 tmp;
 
                WREG32(HDP_DEBUG1, 0);
index 3dfcfa3ca4250bf85e005dc5e6fd05f4c24fbea1..a168d644bf9e96724b5e717f2a8777bb8354f5e5 100644 (file)
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
 
+/* VRAM scratch page for HDP bug */
+struct r700_vram_scratch {
+       struct radeon_bo                *robj;
+       volatile uint32_t               *ptr;
+};
 
 /*
  * Core structure, functions and helpers.
@@ -1079,6 +1084,7 @@ struct radeon_device {
        const struct firmware *pfp_fw;  /* r6/700 PFP firmware */
        const struct firmware *rlc_fw;  /* r6/700 RLC firmware */
        struct r600_blit r600_blit;
+       struct r700_vram_scratch vram_scratch;
        int msi_enabled; /* msi enabled */
        struct r600_ih ih; /* r6/700 interrupt ring */
        struct workqueue_struct *wq;
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
 extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
 extern void radeon_update_display_priority(struct radeon_device *rdev);
 extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
-extern int radeon_clocks_init(struct radeon_device *rdev);
-extern void radeon_clocks_fini(struct radeon_device *rdev);
 extern void radeon_scratch_init(struct radeon_device *rdev);
 extern void radeon_surface_init(struct radeon_device *rdev);
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
index f40dfb77f9b12cfa75c90c83ec608714f15ae423..bd2f33e5c91a695c2110af95397a6fa33cad0c10 100644 (file)
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev)
        }
 
        mode.mode = info.mode;
-       agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+       /* chips with the agp to pcie bridge don't have the AGP_STATUS register
+        * Just use the whatever mode the host sets up.
+        */
+       if (rdev->family <= CHIP_RV350)
+               agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+       else
+               agp_status = mode.mode;
        is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
 
        if (is_v3) {
index 646f96f97c77d8538919325be4bde664a7f3df8b..25e1dd1977917ad4151cdaf599add440ef14d625 100644 (file)
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = {
        .set_engine_clock = &radeon_atom_set_engine_clock,
        .get_memory_clock = &radeon_atom_get_memory_clock,
        .set_memory_clock = &radeon_atom_set_memory_clock,
+       .get_pcie_lanes = NULL,
        .set_pcie_lanes = NULL,
        .set_clock_gating = NULL,
        .set_surface_reg = r600_set_surface_reg,
@@ -857,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
        return 0;
 }
 
-/*
- * Wrapper around modesetting bits. Move to radeon_clocks.c?
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
-       int r;
-
-       r = radeon_static_clocks_init(rdev->ddev);
-       if (r) {
-               return r;
-       }
-       DRM_INFO("Clocks initialized !\n");
-       return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
index 6d30868744eeed8e1890a13532dd77a36161fa4b..ebae14c4b768b4413990e84c1055782a72590009 100644 (file)
 
 /* from radeon_encoder.c */
 extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
-                     uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+                       uint8_t dac);
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
                        uint32_t supported_device);
 
 /* from radeon_connector.c */
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
                          uint32_t supported_device,
                          int connector_type,
                          struct radeon_i2c_bus_rec *i2c_bus,
-                         bool linkb, uint32_t igp_lane_info,
+                         uint32_t igp_lane_info,
                          uint16_t connector_object_id,
                          struct radeon_hpd *hpd,
                          struct radeon_router *router);
 
 /* from radeon_legacy_encoder.c */
 extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
                          uint32_t supported_device);
 
 union atom_supported_devices {
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
 
+                       /* some evergreen boards have bad data for this entry */
+                       if (ASIC_IS_DCE4(rdev)) {
+                               if ((i == 7) &&
+                                   (gpio->usClkMaskRegisterIndex == 0x1936) &&
+                                   (gpio->sucI2cId.ucAccess == 0)) {
+                                       gpio->sucI2cId.ucAccess = 0x97;
+                                       gpio->ucDataMaskShift = 8;
+                                       gpio->ucDataEnShift = 8;
+                                       gpio->ucDataY_Shift = 8;
+                                       gpio->ucDataA_Shift = 8;
+                               }
+                       }
+
                        if (gpio->sucI2cId.ucAccess == id) {
                                i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
                                i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
                        i2c.valid = false;
+
+                       /* some evergreen boards have bad data for this entry */
+                       if (ASIC_IS_DCE4(rdev)) {
+                               if ((i == 7) &&
+                                   (gpio->usClkMaskRegisterIndex == 0x1936) &&
+                                   (gpio->sucI2cId.ucAccess == 0)) {
+                                       gpio->sucI2cId.ucAccess = 0x97;
+                                       gpio->ucDataMaskShift = 8;
+                                       gpio->ucDataEnShift = 8;
+                                       gpio->ucDataY_Shift = 8;
+                                       gpio->ucDataA_Shift = 8;
+                               }
+                       }
+
                        i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
                        i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
                        i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -226,6 +253,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
        struct radeon_hpd hpd;
        u32 reg;
 
+       memset(&hpd, 0, sizeof(struct radeon_hpd));
+
        if (ASIC_IS_DCE4(rdev))
                reg = EVERGREEN_DC_GPIO_HPD_A;
        else
@@ -477,7 +506,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
        int i, j, k, path_size, device_support;
        int connector_type;
        u16 igp_lane_info, conn_id, connector_object_id;
-       bool linkb;
        struct radeon_i2c_bus_rec ddc_bus;
        struct radeon_router router;
        struct radeon_gpio_rec gpio;
@@ -510,7 +538,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                addr += path_size;
                path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
                path_size += le16_to_cpu(path->usSize);
-               linkb = false;
+
                if (device_support & le16_to_cpu(path->usDeviceTag)) {
                        uint8_t con_obj_id, con_obj_num, con_obj_type;
 
@@ -601,13 +629,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
                                if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
-                                       if (grph_obj_num == 2)
-                                               linkb = true;
-                                       else
-                                               linkb = false;
+                                       u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
 
                                        radeon_add_atom_encoder(dev,
-                                                               grph_obj_id,
+                                                               encoder_obj,
                                                                le16_to_cpu
                                                                (path->
                                                                 usDeviceTag));
@@ -744,7 +769,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                  le16_to_cpu(path->
                                                              usDeviceTag),
                                                  connector_type, &ddc_bus,
-                                                 linkb, igp_lane_info,
+                                                 igp_lane_info,
                                                  connector_object_id,
                                                  &hpd,
                                                  &router);
@@ -933,13 +958,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 
                if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
                        radeon_add_atom_encoder(dev,
-                                               radeon_get_encoder_id(dev,
+                                               radeon_get_encoder_enum(dev,
                                                                      (1 << i),
                                                                      dac),
                                                (1 << i));
                else
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        (1 << i),
                                                                        dac),
                                                  (1 << i));
@@ -996,7 +1021,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
                                                  bios_connectors[i].
                                                  connector_type,
                                                  &bios_connectors[i].ddc_bus,
-                                                 false, 0,
+                                                 0,
                                                  connector_object_id,
                                                  &bios_connectors[i].hpd,
                                                  &router);
@@ -1183,7 +1208,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
                                return true;
                        break;
                case 2:
-                       if (igp_info->info_2.ucMemoryType & 0x0f)
+                       if (igp_info->info_2.ulBootUpSidePortClock)
                                return true;
                        break;
                default:
@@ -1305,6 +1330,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
        union lvds_info *lvds_info;
        uint8_t frev, crev;
        struct radeon_encoder_atom_dig *lvds = NULL;
+       int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
        if (atom_parse_data_header(mode_info->atom_context, index, NULL,
                                   &frev, &crev, &data_offset)) {
@@ -1368,6 +1394,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                }
 
                encoder->native_mode = lvds->native_mode;
+
+               if (encoder_enum == 2)
+                       lvds->linkb = true;
+               else
+                       lvds->linkb = false;
+
        }
        return lvds;
 }
index 14448a740ba632cf5f2fcbaf2d2d5804f3446b93..5249af8931e60549e01362102f9c8ca941ba0d24 100644 (file)
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
        mpll->max_feedback_div = 0xff;
        mpll->best_vco = 0;
 
+       if (!rdev->clock.default_sclk)
+               rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+       if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
+               rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+       rdev->pm.current_sclk = rdev->clock.default_sclk;
+       rdev->pm.current_mclk = rdev->clock.default_mclk;
+
 }
 
 /* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
        }
 }
 
-static void radeon_apply_clock_quirks(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-
-       /* XXX make sure engine is idle */
-
-       if (rdev->family < CHIP_RS600) {
-               tmp = RREG32_PLL(RADEON_SCLK_CNTL);
-               if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
-                       tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
-               if ((rdev->family == CHIP_RV250)
-                   || (rdev->family == CHIP_RV280))
-                       tmp |=
-                           RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
-               if ((rdev->family == CHIP_RV350)
-                   || (rdev->family == CHIP_RV380))
-                       tmp |= R300_SCLK_FORCE_VAP;
-               if (rdev->family == CHIP_R420)
-                       tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
-               WREG32_PLL(RADEON_SCLK_CNTL, tmp);
-       } else if (rdev->family < CHIP_R600) {
-               tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
-               tmp |= AVIVO_CP_FORCEON;
-               WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
-
-               tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
-               tmp |= AVIVO_E2_FORCEON;
-               WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
-
-               tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
-               tmp |= AVIVO_IDCT_FORCEON;
-               WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
-       }
-}
-
-int radeon_static_clocks_init(struct drm_device *dev)
-{
-       struct radeon_device *rdev = dev->dev_private;
-
-       /* XXX make sure engine is idle */
-
-       if (radeon_dynclks != -1) {
-               if (radeon_dynclks) {
-                       if (rdev->asic->set_clock_gating)
-                               radeon_set_clock_gating(rdev, 1);
-               }
-       }
-       radeon_apply_clock_quirks(rdev);
-       return 0;
-}
index 885dcfac1838e89367487760aa717d54d7d7f978..bd74e428bd147d0df444cc64858f28acbed0d8ed 100644 (file)
@@ -39,8 +39,8 @@
 
 /* from radeon_encoder.c */
 extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
-                     uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+                       uint8_t dac);
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 
 /* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 
 /* from radeon_legacy_encoder.c */
 extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
                          uint32_t supported_device);
 
 /* old legacy ATI BIOS routines */
@@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_LCD1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                        hpd.hpd = RADEON_HPD_1;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_DFP1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_DFP1_SUPPORT);
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT2_SUPPORT,
                                                                        2),
                                                  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c.valid = false;
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_TV1_SUPPORT,
                                                                        2),
                                                  ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_2; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP2_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP2_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_1; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP1_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_2; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP2_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP2_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_1; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP1_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
                hpd.hpd = RADEON_HPD_1; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                else
                                        devices = ATOM_DEVICE_DFP1_SUPPORT;
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id
+                                                         radeon_get_encoder_enum
                                                          (dev, devices, 0),
                                                          devices);
                                radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                if (tmp & 0x1) {
                                        devices = ATOM_DEVICE_CRT2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT2_SUPPORT,
                                                                   2),
@@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                } else {
                                        devices = ATOM_DEVICE_CRT1_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT1_SUPPORT,
                                                                   1),
@@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                if (tmp & 0x1) {
                                        devices |= ATOM_DEVICE_CRT2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT2_SUPPORT,
                                                                   2),
@@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                } else {
                                        devices |= ATOM_DEVICE_CRT1_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT1_SUPPORT,
                                                                   1),
@@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                if ((tmp >> 4) & 0x1) {
                                        devices |= ATOM_DEVICE_DFP2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_DFP2_SUPPORT,
                                                                   0),
@@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                } else {
                                        devices |= ATOM_DEVICE_DFP1_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_DFP1_SUPPORT,
                                                                   0),
@@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                        connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
                                }
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id
+                                                         radeon_get_encoder_enum
                                                          (dev, devices, 0),
                                                          devices);
                                radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                        case CONNECTOR_CTV_LEGACY:
                        case CONNECTOR_STV_LEGACY:
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id
+                                                         radeon_get_encoder_enum
                                                          (dev,
                                                           ATOM_DEVICE_TV1_SUPPORT,
                                                           2),
@@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                        DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
 
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_DFP1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                        DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
                        if (crt_info) {
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id(dev,
+                                                         radeon_get_encoder_enum(dev,
                                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                                1),
                                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                                     COMBIOS_LCD_DDC_INFO_TABLE);
 
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_LCD1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                        hpd.hpd = RADEON_HPD_NONE;
                                        ddc_i2c.valid = false;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_TV1_SUPPORT,
                                                                   2),
index 47c4b276d30c5fc4c0cd1fdbb9b7cdd5aa7bd634..a9dd7847d96ed673e4548efc343e9112390e7c75 100644 (file)
@@ -977,27 +977,29 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        enum drm_connector_status ret = connector_status_disconnected;
        struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
-       u8 sink_type;
 
        if (radeon_connector->edid) {
                kfree(radeon_connector->edid);
                radeon_connector->edid = NULL;
        }
 
-       sink_type = radeon_dp_getsinktype(radeon_connector);
-       if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-           (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
-               if (radeon_dp_getdpcd(radeon_connector)) {
-                       radeon_dig_connector->dp_sink_type = sink_type;
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               /* eDP is always DP */
+               radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+               if (radeon_dp_getdpcd(radeon_connector))
                        ret = connector_status_connected;
-               }
        } else {
-               if (radeon_ddc_probe(radeon_connector)) {
-                       radeon_dig_connector->dp_sink_type = sink_type;
-                       ret = connector_status_connected;
+               radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+               if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+                       if (radeon_dp_getdpcd(radeon_connector))
+                               ret = connector_status_connected;
+               } else {
+                       if (radeon_ddc_probe(radeon_connector))
+                               ret = connector_status_connected;
                }
        }
 
+       radeon_connector_update_scratch_regs(connector, ret);
        return ret;
 }
 
@@ -1037,7 +1039,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                          uint32_t supported_device,
                          int connector_type,
                          struct radeon_i2c_bus_rec *i2c_bus,
-                         bool linkb,
                          uint32_t igp_lane_info,
                          uint16_t connector_object_id,
                          struct radeon_hpd *hpd,
@@ -1050,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
        uint32_t subpixel_order = SubPixelNone;
        bool shared_ddc = false;
 
-       /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
                return;
 
+       /* if the user selected tv=0 don't try and add the connector */
+       if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+            (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+            (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+           (radeon_tv == 0))
+               return;
+
        /* see if we already added it */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                radeon_connector = to_radeon_connector(connector);
@@ -1128,7 +1135,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1164,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1187,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1211,25 +1215,22 @@ radeon_add_atom_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_SVIDEO:
        case DRM_MODE_CONNECTOR_Composite:
        case DRM_MODE_CONNECTOR_9PinDIN:
-               if (radeon_tv == 1) {
-                       drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-                       radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.load_detect_property,
-                                                     1);
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.tv_std_property,
-                                                     radeon_atombios_get_tv_info(rdev));
-                       /* no HPD on analog connectors */
-                       radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-               }
+               drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+               radeon_connector->dac_load_detect = true;
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.load_detect_property,
+                                             1);
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.tv_std_property,
+                                             radeon_atombios_get_tv_info(rdev));
+               /* no HPD on analog connectors */
+               radeon_connector->hpd.hpd = RADEON_HPD_NONE;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1275,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
        struct radeon_connector *radeon_connector;
        uint32_t subpixel_order = SubPixelNone;
 
-       /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
                return;
 
+       /* if the user selected tv=0 don't try and add the connector */
+       if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+            (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+            (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+           (radeon_tv == 0))
+               return;
+
        /* see if we already added it */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                radeon_connector = to_radeon_connector(connector);
@@ -1350,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_SVIDEO:
        case DRM_MODE_CONNECTOR_Composite:
        case DRM_MODE_CONNECTOR_9PinDIN:
-               if (radeon_tv == 1) {
-                       drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-                       radeon_connector->dac_load_detect = true;
-                       /* RS400,RC410,RS480 chipset seems to report a lot
-                        * of false positive on load detect, we haven't yet
-                        * found a way to make load detect reliable on those
-                        * chipset, thus just disable it for TV.
-                        */
-                       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
-                               radeon_connector->dac_load_detect = false;
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.load_detect_property,
-                                                     radeon_connector->dac_load_detect);
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.tv_std_property,
-                                                     radeon_combios_get_tv_info(rdev));
-                       /* no HPD on analog connectors */
-                       radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-               }
+               drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+               radeon_connector->dac_load_detect = true;
+               /* RS400,RC410,RS480 chipset seems to report a lot
+                * of false positive on load detect, we haven't yet
+                * found a way to make load detect reliable on those
+                * chipset, thus just disable it for TV.
+                */
+               if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+                       radeon_connector->dac_load_detect = false;
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.load_detect_property,
+                                             radeon_connector->dac_load_detect);
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.tv_std_property,
+                                             radeon_combios_get_tv_info(rdev));
+               /* no HPD on analog connectors */
+               radeon_connector->hpd.hpd = RADEON_HPD_NONE;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
index 4f7a170d15663a3f3030f2e02e517718e6998968..256d204a6d24226a93f68b5d4d097fc4b52739c7 100644 (file)
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
                mc->mc_vram_size = mc->aper_size;
        }
        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-       if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+       if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
                mc->real_vram_size = mc->aper_size;
                mc->mc_vram_size = mc->aper_size;
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
 void radeon_update_bandwidth_info(struct radeon_device *rdev)
 {
        fixed20_12 a;
-       u32 sclk, mclk;
+       u32 sclk = rdev->pm.current_sclk;
+       u32 mclk = rdev->pm.current_mclk;
 
-       if (rdev->flags & RADEON_IS_IGP) {
-               sclk = radeon_get_engine_clock(rdev);
-               mclk = rdev->clock.default_mclk;
-
-               a.full = dfixed_const(100);
-               rdev->pm.sclk.full = dfixed_const(sclk);
-               rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
-               rdev->pm.mclk.full = dfixed_const(mclk);
-               rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+       /* sclk/mclk in Mhz */
+       a.full = dfixed_const(100);
+       rdev->pm.sclk.full = dfixed_const(sclk);
+       rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+       rdev->pm.mclk.full = dfixed_const(mclk);
+       rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 
+       if (rdev->flags & RADEON_IS_IGP) {
                a.full = dfixed_const(16);
                /* core_bandwidth = sclk(Mhz) * 16 */
                rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
-       } else {
-               sclk = radeon_get_engine_clock(rdev);
-               mclk = radeon_get_memory_clock(rdev);
-
-               a.full = dfixed_const(100);
-               rdev->pm.sclk.full = dfixed_const(sclk);
-               rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
-               rdev->pm.mclk.full = dfixed_const(mclk);
-               rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
        }
 }
 
index 5764f4d3b4f1a0804caf56f39e6a75c6c9afaf57..6dd434ad2429b9d9689ed861d33a2b1c0b09f092 100644 (file)
@@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev)
        radeon_i2c_fini(rdev);
 }
 
+static bool is_hdtv_mode(struct drm_display_mode *mode)
+{
+       /* try and guess if this is a tv or a monitor */
+       if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+           (mode->vdisplay == 576) || /* 576p */
+           (mode->vdisplay == 720) || /* 720p */
+           (mode->vdisplay == 1080)) /* 1080p */
+               return true;
+       else
+               return false;
+}
+
 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                                struct drm_display_mode *mode,
                                struct drm_display_mode *adjusted_mode)
@@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                        if (ASIC_IS_AVIVO(rdev) &&
                            ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
                             ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
-                             drm_detect_hdmi_monitor(radeon_connector->edid)))) {
+                             drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                             is_hdtv_mode(mode)))) {
                                radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
                                radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
                                radeon_crtc->rmx_type = RMX_FULL;
index 263c8098d7dd279447ebba20e408c25f791f0b7d..2c293e8304d657d047144708bff75c6474d560f0 100644 (file)
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
 }
 
 uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
 {
        struct radeon_device *rdev = dev->dev_private;
        uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
                        if ((rdev->family == CHIP_RS300) ||
                            (rdev->family == CHIP_RS400) ||
                            (rdev->family == CHIP_RS480))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+                               ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
                        else if (ASIC_IS_AVIVO(rdev))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+                               ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
                        else
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+                               ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
                        break;
                case 2: /* dac b */
                        if (ASIC_IS_AVIVO(rdev))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+                               ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
                        else {
                                /*if (rdev->family == CHIP_R200)
-                                 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                                 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                                  else*/
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+                               ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
                        }
                        break;
                case 3: /* external dac */
                        if (ASIC_IS_AVIVO(rdev))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+                               ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
                        else
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                               ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                        break;
                }
                break;
        case ATOM_DEVICE_LCD1_SUPPORT:
                if (ASIC_IS_AVIVO(rdev))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+                       ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
                else
-                       ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+                       ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
                break;
        case ATOM_DEVICE_DFP1_SUPPORT:
                if ((rdev->family == CHIP_RS300) ||
                    (rdev->family == CHIP_RS400) ||
                    (rdev->family == CHIP_RS480))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                       ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                else if (ASIC_IS_AVIVO(rdev))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+                       ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
                else
-                       ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+                       ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
                break;
        case ATOM_DEVICE_LCD2_SUPPORT:
        case ATOM_DEVICE_DFP2_SUPPORT:
                if ((rdev->family == CHIP_RS600) ||
                    (rdev->family == CHIP_RS690) ||
                    (rdev->family == CHIP_RS740))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+                       ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
                else if (ASIC_IS_AVIVO(rdev))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+                       ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
                else
-                       ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                       ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                break;
        case ATOM_DEVICE_DFP3_SUPPORT:
-               ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+               ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
                break;
        }
 
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
        return NULL;
 }
 
-static struct radeon_connector_atom_dig *
-radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector;
-       struct radeon_connector_atom_dig *dig_connector;
-
-       if (!rdev->is_atom_bios)
-               return NULL;
-
-       connector = radeon_get_connector_for_encoder(encoder);
-       if (!connector)
-               return NULL;
-
-       radeon_connector = to_radeon_connector(connector);
-
-       if (!radeon_connector->con_priv)
-               return NULL;
-
-       dig_connector = radeon_connector->con_priv;
-
-       return dig_connector;
-}
-
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
                             struct drm_display_mode *adjusted_mode)
 {
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_get_atom_connector_priv_from_encoder(encoder);
        union lvds_encoder_control args;
        int index = 0;
        int hdmi_detected = 0;
        uint8_t frev, crev;
 
-       if (!dig || !dig_connector)
+       if (!dig)
                return;
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                                if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
                                        args.v1.ucMisc |= (1 << 1);
                        } else {
-                               if (dig_connector->linkb)
+                               if (dig->linkb)
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
                                if (radeon_encoder->pixel_clock > 165000)
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                                                args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
                                }
                        } else {
-                               if (dig_connector->linkb)
+                               if (dig->linkb)
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
                                if (radeon_encoder->pixel_clock > 165000)
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
 int
 atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if (drm_detect_hdmi_monitor(radeon_connector->edid))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else if (radeon_connector->use_digital)
+               if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* fix me */
+                       if (ASIC_IS_DCE4(rdev))
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_HDMI;
+               } else if (radeon_connector->use_digital)
                        return ATOM_ENCODER_MODE_DVI;
                else
                        return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if (drm_detect_hdmi_monitor(radeon_connector->edid))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* fix me */
+                       if (ASIC_IS_DCE4(rdev))
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_HDMI;
+               } else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
-               else if (drm_detect_hdmi_monitor(radeon_connector->edid))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* fix me */
+                       if (ASIC_IS_DCE4(rdev))
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_HDMI;
+               } else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_get_atom_connector_priv_from_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        union dig_encoder_control args;
        int index = 0;
        uint8_t frev, crev;
+       int dp_clock = 0;
+       int dp_lane_count = 0;
+
+       if (connector) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector_atom_dig *dig_connector =
+                       radeon_connector->con_priv;
 
-       if (!dig || !dig_connector)
+               dp_clock = dig_connector->dp_clock;
+               dp_lane_count = dig_connector->dp_lane_count;
+       }
+
+       /* no dig encoder assigned */
+       if (dig->dig_encoder == -1)
                return;
 
        memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
        args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
        if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-               if (dig_connector->dp_clock == 270000)
+               if (dp_clock == 270000)
                        args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-               args.v1.ucLaneNum = dig_connector->dp_lane_count;
+               args.v1.ucLaneNum = dp_lane_count;
        } else if (radeon_encoder->pixel_clock > 165000)
                args.v1.ucLaneNum = 8;
        else
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
                        args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
                        break;
                }
-               if (dig_connector->linkb)
+               if (dig->linkb)
                        args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
                else
                        args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_get_atom_connector_priv_from_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        union dig_transmitter_control args;
        int index = 0;
        uint8_t frev, crev;
        bool is_dp = false;
        int pll_id = 0;
+       int dp_clock = 0;
+       int dp_lane_count = 0;
+       int connector_object_id = 0;
+       int igp_lane_info = 0;
 
-       if (!dig || !dig_connector)
-               return;
+       if (connector) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector_atom_dig *dig_connector =
+                       radeon_connector->con_priv;
 
-       connector = radeon_get_connector_for_encoder(encoder);
-       radeon_connector = to_radeon_connector(connector);
+               dp_clock = dig_connector->dp_clock;
+               dp_lane_count = dig_connector->dp_lane_count;
+               connector_object_id =
+                       (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+               igp_lane_info = dig_connector->igp_lane_info;
+       }
+
+       /* no dig encoder assigned */
+       if (dig->dig_encoder == -1)
+               return;
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
                is_dp = true;
 
        memset(&args, 0, sizeof(args));
 
-       if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
                index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
-       else {
-               switch (radeon_encoder->encoder_id) {
-               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-                       index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
-                       break;
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-                       index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
-                       break;
-               }
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+               index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+               break;
        }
 
        if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
        args.v1.ucAction = action;
        if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-               args.v1.usInitInfo = radeon_connector->connector_object_id;
+               args.v1.usInitInfo = connector_object_id;
        } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
                args.v1.asMode.ucLaneSel = lane_num;
                args.v1.asMode.ucLaneSet = lane_set;
        } else {
                if (is_dp)
                        args.v1.usPixelClock =
-                               cpu_to_le16(dig_connector->dp_clock / 10);
+                               cpu_to_le16(dp_clock / 10);
                else if (radeon_encoder->pixel_clock > 165000)
                        args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
                else
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        }
        if (ASIC_IS_DCE4(rdev)) {
                if (is_dp)
-                       args.v3.ucLaneNum = dig_connector->dp_lane_count;
+                       args.v3.ucLaneNum = dp_lane_count;
                else if (radeon_encoder->pixel_clock > 165000)
                        args.v3.ucLaneNum = 8;
                else
                        args.v3.ucLaneNum = 4;
 
-               if (dig_connector->linkb) {
+               if (dig->linkb) {
                        args.v3.acConfig.ucLinkSel = 1;
                        args.v3.acConfig.ucEncoderSel = 1;
                }
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                }
        } else if (ASIC_IS_DCE32(rdev)) {
                args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
-               if (dig_connector->linkb)
+               if (dig->linkb)
                        args.v2.acConfig.ucLinkSel = 1;
 
                switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                if ((rdev->flags & RADEON_IS_IGP) &&
                    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
                        if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
-                               if (dig_connector->igp_lane_info & 0x1)
+                               if (igp_lane_info & 0x1)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-                               else if (dig_connector->igp_lane_info & 0x2)
+                               else if (igp_lane_info & 0x2)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
-                               else if (dig_connector->igp_lane_info & 0x4)
+                               else if (igp_lane_info & 0x4)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
-                               else if (dig_connector->igp_lane_info & 0x8)
+                               else if (igp_lane_info & 0x8)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
                        } else {
-                               if (dig_connector->igp_lane_info & 0x3)
+                               if (igp_lane_info & 0x3)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
-                               else if (dig_connector->igp_lane_info & 0xc)
+                               else if (igp_lane_info & 0xc)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
                        }
                }
 
-               if (dig_connector->linkb)
+               if (dig->linkb)
                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
                else
                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
        if (is_dig) {
                switch (mode) {
                case DRM_MODE_DPMS_ON:
-                       if (!ASIC_IS_DCE4(rdev))
-                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
                                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
                case DRM_MODE_DPMS_STANDBY:
                case DRM_MODE_DPMS_SUSPEND:
                case DRM_MODE_DPMS_OFF:
-                       if (!ASIC_IS_DCE4(rdev))
-                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
                        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
                                if (ASIC_IS_DCE4(rdev))
                                        atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
        uint32_t dig_enc_in_use = 0;
 
        if (ASIC_IS_DCE4(rdev)) {
-               struct radeon_connector_atom_dig *dig_connector =
-                       radeon_get_atom_connector_priv_from_encoder(encoder);
-
+               dig = radeon_encoder->enc_priv;
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-                       if (dig_connector->linkb)
+                       if (dig->linkb)
                                return 1;
                        else
                                return 0;
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-                       if (dig_connector->linkb)
+                       if (dig->linkb)
                                return 3;
                        else
                                return 2;
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-                       if (dig_connector->linkb)
+                       if (dig->linkb)
                                return 5;
                        else
                                return 4;
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
 struct radeon_encoder_atom_dig *
 radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
 {
+       int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
        struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
        if (!dig)
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
        dig->coherent_mode = true;
        dig->dig_encoder = -1;
 
+       if (encoder_enum == 2)
+               dig->linkb = true;
+       else
+               dig->linkb = false;
+
        return dig;
 }
 
 void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
        /* see if we already added it */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                radeon_encoder = to_radeon_encoder(encoder);
-               if (radeon_encoder->encoder_id == encoder_id) {
+               if (radeon_encoder->encoder_enum == encoder_enum) {
                        radeon_encoder->devices |= supported_device;
                        return;
                }
@@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
 
        radeon_encoder->enc_priv = NULL;
 
-       radeon_encoder->encoder_id = encoder_id;
+       radeon_encoder->encoder_enum = encoder_enum;
+       radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
        radeon_encoder->devices = supported_device;
        radeon_encoder->rmx_type = RMX_OFF;
        radeon_encoder->underscan_type = UNDERSCAN_OFF;
index dbf86962bdd16a5741c3de1369c4ebe7d6c36d29..c74a8b20d9413e921bc6a03cd92578146a9ee8ef 100644 (file)
@@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                                       RADEON_GEM_DOMAIN_VRAM,
-                                      false, ttm_bo_type_kernel,
+                                      false, true,
                                       &gobj);
        if (ret) {
                printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
index bfd2ce5f53728dd322767ec186cda77993215f24..6a13ee38a5b9fc71201a0a9a38dd93b3ab5957eb 100644 (file)
@@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
                }
        }
 
+       /* switch the pads to ddc mode */
+       if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+               temp = RREG32(rec->mask_clk_reg);
+               temp &= ~(1 << 16);
+               WREG32(rec->mask_clk_reg, temp);
+       }
+
        /* clear the output pin values */
        temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
        WREG32(rec->a_clk_reg, temp);
@@ -206,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
 
 static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
 {
-       u32 sclk = radeon_get_engine_clock(rdev);
+       u32 sclk = rdev->pm.current_sclk;
        u32 prescale = 0;
        u32 nm;
        u8 n, m, loop;
index 059bfa4098d7a5990034cf00c0d3bd84b8b09270..a108c7ed14f5941a4954839a583bc2f48f7cf44e 100644 (file)
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
         * chips.  Disable MSI on them for now.
         */
        if ((rdev->family >= CHIP_RV380) &&
-           (!(rdev->flags & RADEON_IS_IGP))) {
+           (!(rdev->flags & RADEON_IS_IGP)) &&
+           (!(rdev->flags & RADEON_IS_AGP))) {
                int ret = pci_enable_msi(rdev->pdev);
                if (!ret) {
                        rdev->msi_enabled = 1;
-                       DRM_INFO("radeon: using MSI.\n");
+                       dev_info(rdev->dev, "radeon: using MSI.\n");
                }
        }
        rdev->irq.installed = true;
index b1c8ace5f0802c908ce653022bbe2921470c8e90..5eee3c41d124bf49fbd5dfbc7264fb062699e961 100644 (file)
@@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
                        return -EINVAL;
                }
+               break;
        case RADEON_INFO_WANT_HYPERZ:
                /* The "value" here is both an input and output parameter.
                 * If the input value is 1, filp requests hyper-z access.
@@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
 
 
 struct drm_ioctl_desc radeon_ioctls_kms[] = {
-       DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
        /* KMS */
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
 };
 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
index 989df519a1e453b618e2599f0eeced23e637dc91..305049afde15235e558dc22f3992543508f362e6 100644 (file)
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
        if (!ref_div)
                return 1;
 
-       vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+       vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
 
        /*
         * This is horribly crude: the VCO frequency range is divided into
index b8149cbc0c70ca3511f748d13e05cb85c16073e9..0b8397000f4c68ae98e80df717a652cb2ddf6253 100644 (file)
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
 }
 
 void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
        /* see if we already added it */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                radeon_encoder = to_radeon_encoder(encoder);
-               if (radeon_encoder->encoder_id == encoder_id) {
+               if (radeon_encoder->encoder_enum == encoder_enum) {
                        radeon_encoder->devices |= supported_device;
                        return;
                }
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
 
        radeon_encoder->enc_priv = NULL;
 
-       radeon_encoder->encoder_id = encoder_id;
+       radeon_encoder->encoder_enum = encoder_enum;
+       radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
        radeon_encoder->devices = supported_device;
        radeon_encoder->rmx_type = RMX_OFF;
 
index 5bbc086b9267bb16e6d324de5af25977cdf76dae..efbe975312dc42342c2add89c417b753b0e92791 100644 (file)
@@ -342,6 +342,7 @@ struct radeon_atom_ss {
 };
 
 struct radeon_encoder_atom_dig {
+       bool linkb;
        /* atom dig */
        bool coherent_mode;
        int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac {
 
 struct radeon_encoder {
        struct drm_encoder base;
+       uint32_t encoder_enum;
        uint32_t encoder_id;
        uint32_t devices;
        uint32_t active_device;
@@ -378,7 +380,6 @@ struct radeon_encoder {
 
 struct radeon_connector_atom_dig {
        uint32_t igp_lane_info;
-       bool linkb;
        /* displayport */
        struct radeon_i2c_chan *dp_i2c_bus;
        u8 dpcd[8];
@@ -599,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
 void radeon_enc_destroy(struct drm_encoder *encoder);
 void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
 void radeon_combios_asic_init(struct drm_device *dev);
-extern int radeon_static_clocks_init(struct drm_device *dev);
 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                                        struct drm_display_mode *mode,
                                        struct drm_display_mode *adjusted_mode);
index 58038f5cab38d4b0c31ac7c2abfbad9a18c533b2..f87efec76236966ac790ff0709f5cf2ae3c57563 100644 (file)
@@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 {
        int i;
 
+       /* no need to take locks, etc. if nothing's going to change */
+       if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+           (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+               return;
+
        mutex_lock(&rdev->ddev->struct_mutex);
        mutex_lock(&rdev->vram_mutex);
        mutex_lock(&rdev->cp.mutex);
@@ -632,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
        }
 
        radeon_hwmon_fini(rdev);
-       if (rdev->pm.i2c_bus)
-               radeon_i2c_destroy(rdev->pm.i2c_bus);
 }
 
 void radeon_pm_compute_clocks(struct radeon_device *rdev)
index b3ba44c0a81801a9f528edf833be726a05a55c40..4ae5a3d1074e19c9a203f7433cfc12e1c7724042 100644 (file)
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 struct drm_ioctl_desc radeon_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+       DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
 };
 
 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
index f1c796810117fdb9dfb0b66aa5a89eee5e1da1e9..bfa59db374d23d3c4a06877a6e9a37aec59904e0 100644 (file)
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
 }
 
+static int rv770_vram_scratch_init(struct radeon_device *rdev)
+{
+       int r;
+       u64 gpu_addr;
+
+       if (rdev->vram_scratch.robj == NULL) {
+               r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+                                       true, RADEON_GEM_DOMAIN_VRAM,
+                                       &rdev->vram_scratch.robj);
+               if (r) {
+                       return r;
+               }
+       }
+
+       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->vram_scratch.robj,
+                         RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(rdev->vram_scratch.robj);
+               return r;
+       }
+       r = radeon_bo_kmap(rdev->vram_scratch.robj,
+                               (void **)&rdev->vram_scratch.ptr);
+       if (r)
+               radeon_bo_unpin(rdev->vram_scratch.robj);
+       radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+       return r;
+}
+
+static void rv770_vram_scratch_fini(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->vram_scratch.robj == NULL) {
+               return;
+       }
+       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+       if (likely(r == 0)) {
+               radeon_bo_kunmap(rdev->vram_scratch.robj);
+               radeon_bo_unpin(rdev->vram_scratch.robj);
+               radeon_bo_unreserve(rdev->vram_scratch.robj);
+       }
+       radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
 int rv770_mc_init(struct radeon_device *rdev)
 {
        u32 tmp;
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
+       r = rv770_vram_scratch_init(rdev);
+       if (r)
+               return r;
        rv770_gpu_init(rdev);
        r = r600_blit_init(rdev);
        if (r) {
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
         */
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
-       /* Initialize clocks */
-       r = radeon_clocks_init(rdev);
-       if (r) {
-               return r;
-       }
 
        r = rv770_startup(rdev);
        if (r) {
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
        radeon_surface_init(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       r = radeon_clocks_init(rdev);
-       if (r)
-               return r;
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
+       rv770_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
-       radeon_clocks_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
index 976dc8d25280c749798f1c1f124219190c10ca84..bf5f83ea14fe19819874311b66618957859286f2 100644 (file)
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 struct drm_ioctl_desc savage_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
 };
 
 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
index 07d0f2979cac1158f4db86a90e28bf4184d2b1fa..7fe2b63412ce96aa2c0829127c25261a7bb7aa6d 100644 (file)
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
 }
 
 struct drm_ioctl_desc sis_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
 };
 
 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
index 68dda74a50ae54c8273eb0e076d7ff151f9f7231..cc0ffa9abd00da2ff0504679bad6f32b04f0d561 100644 (file)
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
 }
 
 struct drm_ioctl_desc via_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
-       DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
-       DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
-       DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+       DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
 };
 
 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
index 9dd395b90216b17c54598c9309502785a2684220..72ec2e2b6e9787196ca1de65f28e4c6a0f090051 100644 (file)
  */
 
 #define VMW_IOCTL_DEF(ioctl, func, flags) \
-       [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
 
 /**
  * Ioctl definitions.
  */
 
 static struct drm_ioctl_desc vmw_ioctls[] = {
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+       VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+       VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+       VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
                      vmw_kms_cursor_bypass_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+       VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+       VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+       VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+       VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+       VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+       VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+       VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
                      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+       VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+       VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
 };
 
index b300a2048af153f9aa09cbd546a06fcadb3a4588..52319340e182da8f189eae518d80bd3674d26425 100644 (file)
@@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = {
 
 static int __devinit ads7871_probe(struct spi_device *spi)
 {
-       int status, ret, err = 0;
+       int ret, err;
        uint8_t val;
        struct ads7871_data *pdata;
 
        dev_dbg(&spi->dev, "probe\n");
 
-       pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
-       if (!pdata) {
-               err = -ENOMEM;
-               goto exit;
-       }
-
-       status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
-       if (status < 0)
-               goto error_free;
-
-       pdata->hwmon_dev = hwmon_device_register(&spi->dev);
-       if (IS_ERR(pdata->hwmon_dev)) {
-               err = PTR_ERR(pdata->hwmon_dev);
-               goto error_remove;
-       }
-
-       spi_set_drvdata(spi, pdata);
-
        /* Configure the SPI bus */
        spi->mode = (SPI_MODE_0);
        spi->bits_per_word = 8;
@@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi)
        we need to make sure we really have a chip*/
        if (val != ret) {
                err = -ENODEV;
+               goto exit;
+       }
+
+       pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+       if (!pdata) {
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+       if (err < 0)
+               goto error_free;
+
+       spi_set_drvdata(spi, pdata);
+
+       pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+       if (IS_ERR(pdata->hwmon_dev)) {
+               err = PTR_ERR(pdata->hwmon_dev);
                goto error_remove;
        }
 
index c070c9714cbe2fa84646b41626655b4c51a7157e..de8111114f469ec21567a5781349bb0d7f7cbc98 100644 (file)
@@ -518,7 +518,6 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
 static int __init coretemp_init(void)
 {
        int i, err = -ENODEV;
-       struct pdev_entry *p, *n;
 
        /* quick check if we run Intel */
        if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
index 7580f55e67e3cf1560437b428d9fb1e5b8e411d0..36e95753223059ab0e1b5ed8490fe7364b39673e 100644 (file)
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+       AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+       AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index b9bb3e0ca53083bd152332be698c58dbd0ff66bd..39ead2a4d3c50bfa34acb37651739920c22c76ee 100644 (file)
@@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = {
 
 MODULE_DEVICE_TABLE(pci, k8temp_ids);
 
+static int __devinit is_rev_g_desktop(u8 model)
+{
+       u32 brandidx;
+
+       if (model < 0x69)
+               return 0;
+
+       if (model == 0xc1 || model == 0x6c || model == 0x7c)
+               return 0;
+
+       /*
+        * Differentiate between AM2 and ASB1.
+        * See "Constructing the processor Name String" in "Revision
+        * Guide for AMD NPT Family 0Fh Processors" (33610).
+        */
+       brandidx = cpuid_ebx(0x80000001);
+       brandidx = (brandidx >> 9) & 0x1f;
+
+       /* Single core */
+       if ((model == 0x6f || model == 0x7f) &&
+           (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
+               return 0;
+
+       /* Dual core */
+       if (model == 0x6b &&
+           (brandidx == 0xb || brandidx == 0xc))
+               return 0;
+
+       return 1;
+}
+
 static int __devinit k8temp_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
                                 "wrong - check erratum #141\n");
                }
 
-               if ((model >= 0x69) &&
-                   !(model == 0xc1 || model == 0x6c || model == 0x7c ||
-                     model == 0x6b || model == 0x6f || model == 0x7f)) {
+               if (is_rev_g_desktop(model)) {
                        /*
                         * RevG desktop CPUs (i.e. no socket S1G1 or
                         * ASB1 parts) need additional offset,
index d0dc1db80b295adc31ad88eb38bb82a3106f6175..50815022cff1dccc4a285d70a9b58d5760315175 100644 (file)
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
                if (recv->block_irq_interval * 4 > iso->buf_packets)
                        recv->block_irq_interval = iso->buf_packets / 4;
                if (recv->block_irq_interval < 1)
-               recv->block_irq_interval = 1;
+                       recv->block_irq_interval = 1;
 
                /* choose a buffer stride */
                /* must be a power of 2, and <= PAGE_SIZE */
index 8f0caf7d4482079ef45aa9ea3b8a66d6a37500d4..78fbe9ffe7f024f3f4e1ca486bcbeb5976087e27 100644 (file)
@@ -53,7 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
 #define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
index 443cea55daac5973469cf43fabe2a388abeadb55..61e0efd4ccfb5d9d4d6f6bdc50f765d365c07d0e 100644 (file)
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 static void nes_retrans_expired(struct nes_cm_node *cm_node)
 {
        struct iw_cm_id *cm_id = cm_node->cm_id;
-       switch (cm_node->state) {
+       enum nes_cm_node_state state = cm_node->state;
+       cm_node->state = NES_CM_STATE_CLOSED;
+       switch (state) {
        case NES_CM_STATE_SYN_RCVD:
        case NES_CM_STATE_CLOSING:
                rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
        case NES_CM_STATE_FIN_WAIT1:
                if (cm_node->cm_id)
                        cm_id->rem_ref(cm_id);
-               cm_node->state = NES_CM_STATE_CLOSED;
                send_reset(cm_node, NULL);
                break;
        default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
                passive_state = atomic_add_return(1, &cm_node->passive_state);
-               if (passive_state ==  NES_SEND_RESET_EVENT)
-                       create_event(cm_node, NES_CM_EVENT_RESET);
-               cm_node->state = NES_CM_STATE_CLOSED;
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
        case NES_CM_STATE_CLOSED:
                drop_packet(skb);
                break;
+       case NES_CM_STATE_FIN_WAIT2:
        case NES_CM_STATE_FIN_WAIT1:
        case NES_CM_STATE_LAST_ACK:
                cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                return -EINVAL;
        }
 
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == NES_SEND_RESET_EVENT) {
+               rem_ref_cm_node(cm_node->cm_core, cm_node);
+               return -ECONNRESET;
+       }
+
        /* associate the node with the QP */
        nesqp->cm_node = (void *)cm_node;
        cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
                        "ret=%d\n", __func__, __LINE__, ret);
 
-       passive_state = atomic_add_return(1, &cm_node->passive_state);
-       if (passive_state == NES_SEND_RESET_EVENT)
-               create_event(cm_node, NES_CM_EVENT_RESET);
        return 0;
 }
 
index f8233c851c694862d76861e515b93183b8d387a5..1980a461c49904e93102e02e655e5b033c5f92be 100644 (file)
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                return; /* Ignore it, wait for close complete */
 
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+                               if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+                                       (nesqp->ibqp_state == IB_QPS_RTS) &&
+                                       ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+                                       spin_lock_irqsave(&nesqp->lock, flags);
+                                       nesqp->hw_iwarp_state = iwarp_state;
+                                       nesqp->hw_tcp_state = tcp_state;
+                                       nesqp->last_aeq = async_event_id;
+                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+                                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+                                       spin_unlock_irqrestore(&nesqp->lock, flags);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                                       nes_cm_disconn(nesqp);
+                               }
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
                                                NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
-
                        break;
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
                        if (nesqp->term_flags) {
index aa9183db32b104aaaa7bfad081c3c969699cec72..1204c3432b6322f23518c42d550746f19d9e4ce6 100644 (file)
@@ -45,6 +45,7 @@
 #define NES_PHY_TYPE_KR               9
 
 #define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
 
 enum pci_regs {
        NES_INT_STAT = 0x0000,
index 6dfdd49cdbcf36ef5cd68aee3caf46dbdaeb77f0..10560c796fd6c0ffc591601c610579d3e1b6e8ca 100644 (file)
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 0;
        } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
                u32temp = nes_read_indexed(nesdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 1;
        }
        if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
index a9b025f4147a0692845d2407b6efbd9220f9837e..ab6982056518e3c086c57738f360574b211108aa 100644 (file)
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
  *
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
  */
 int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
 {
+       int i;
+
        if (!num_slots)
                return 0;
 
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
        dev->mtsize = num_slots;
        input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
 
+       /* Mark slots as 'unused' */
+       for (i = 0; i < num_slots; i++)
+               dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
        return 0;
 }
 EXPORT_SYMBOL(input_mt_create_slots);
index dcc86b97a1535c6a3ba098543d691ee249d56cf5..19fa94af207a6fa37fbadc7a3d39a65ea2010482 100644 (file)
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
                if (absdev) {
                        val = lo + (hi << 8);
 #ifdef TABLET_AUTOADJUST
-                       if (val < input_abs_min(dev, ABS_X + i))
+                       if (val < input_abs_get_min(dev, ABS_X + i))
                                input_abs_set_min(dev, ABS_X + i, val);
-                       if (val > input_abs_max(dev, ABS_X + i))
+                       if (val > input_abs_get_max(dev, ABS_X + i))
                                input_abs_set_max(dev, ABS_X + i, val);
 #endif
                        if (i % 3)
-                               val = input_abs_max(dev, ABS_X + i) - val;
+                               val = input_abs_get_max(dev, ABS_X + i) - val;
                        input_report_abs(dev, ABS_X + i, val);
                } else {
                        val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
 
 #ifdef TABLET_AUTOADJUST
                for (i = 0; i < ABS_MAX; i++) {
-                       int diff = input_abs_max(input_dev, ABS_X + i) / 10;
+                       int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
                        input_abs_set_min(input_dev, ABS_X + i,
-                               input_abs_min(input_dev, ABS_X + i) + diff)
+                               input_abs_get_min(input_dev, ABS_X + i) + diff);
                        input_abs_set_max(input_dev, ABS_X + i,
-                               input_abs_max(input_dev, ABS_X + i) - diff)
+                               input_abs_get_max(input_dev, ABS_X + i) - diff);
                }
 #endif
 
index 0e53b3bc39afe4d7925d4aec0cea66174dfc1e5e..f32404f991893ef4584ab5540d213945cbb0c738 100644 (file)
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
        clk_put(keypad->clk);
 
        input_unregister_device(keypad->input_dev);
-       input_free_device(keypad->input_dev);
-
        iounmap(keypad->mmio_base);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index bb53fd33cd1cb64e5d3c108b84eea4e1f11ccff4..0d4266a533a524564adcc85cc546736248753628 100644 (file)
@@ -811,6 +811,8 @@ static struct miscdevice uinput_misc = {
        .minor          = UINPUT_MINOR,
        .name           = UINPUT_NAME,
 };
+MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
+MODULE_ALIAS("devname:" UINPUT_NAME);
 
 static int __init uinput_init(void)
 {
index ea67c49146a3a03280ee8719c362c41d8033c743..b952317639116f2f18a7bc1f41ff5887c17f2a49 100644 (file)
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
                               const struct bcm5974_config *cfg,
                               const struct tp_finger *f)
 {
-       input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
-       input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
-       input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
-       input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                        raw2int(f->force_major) << 1);
+       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                        raw2int(f->force_minor) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+                        raw2int(f->size_major) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MINOR,
+                        raw2int(f->size_minor) << 1);
        input_report_abs(input, ABS_MT_ORIENTATION,
                         MAX_FINGER_ORIENTATION - raw2int(f->orientation));
        input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
index 83c24cca234a5849545adeda916b0e4a136769fb..d528a2dba06418eb5e80097552463e9abec5db05 100644 (file)
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
 
                fx(0) = value;
                if (mousedev->touch && mousedev->pkt_count >= 2) {
-                       size = input_abs_get_min(dev, ABS_X) -
-                                       input_abs_get_max(dev, ABS_X);
+                       size = input_abs_get_max(dev, ABS_X) -
+                                       input_abs_get_min(dev, ABS_X);
                        if (size == 0)
                                size = 256 * 2;
 
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
                fy(0) = value;
                if (mousedev->touch && mousedev->pkt_count >= 2) {
                        /* use X size for ABS_Y to keep the same scale */
-                       size = input_abs_get_min(dev, ABS_X) -
-                                       input_abs_get_max(dev, ABS_X);
+                       size = input_abs_get_max(dev, ABS_X) -
+                                       input_abs_get_min(dev, ABS_X);
                        if (size == 0)
                                size = 256 * 2;
 
index 46e4ba0b92463184d5e398345e50ca68809628b2..f585131604806f531f91c48ebb867919339fd80f 100644 (file)
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_driver_unregister(&i8042_driver);
        platform_device_unregister(i8042_platform_device);
+       platform_driver_unregister(&i8042_driver);
        i8042_platform_exit();
 
        panic_blink = NULL;
index 40d77ba8fdc138ff98b0320b877358a5302d7a28..6e29badb969e44192be85080caf801851c3d06fa 100644 (file)
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
                        if (features->type == WACOM_G4 ||
                                        features->type == WACOM_MO) {
                                input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+                               rw = (data[7] & 0x04) - (data[7] & 0x03);
                        } else {
                                input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
+                               rw = -(signed char)data[6];
                        }
                        input_report_rel(input, REL_WHEEL, rw);
                }
index 5dbcbe3a54a610d81d61b6e0b4d227af7751636d..b99b906ea9b1bfef709bec43e61237a74b177949 100644 (file)
@@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA
 
 config ISDN_DRV_AVMB1_B1PCMCIA
        tristate "AVM B1/M1/M2 PCMCIA support"
+       depends on PCMCIA
        help
          Enable support for the PCMCIA version of the AVM B1 card.
 
 config ISDN_DRV_AVMB1_AVM_CS
        tristate "AVM B1/M1/M2 PCMCIA cs module"
-       depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA
+       depends on ISDN_DRV_AVMB1_B1PCMCIA
        help
          Enable the PCMCIA client driver for the AVM B1/M1/M2
          PCMCIA cards.
index 35bc2737412fddaa3323e99ea34a3dd3de762355..2d17e76066bd0b640890d8a0e0ddc5ac7170ec85 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/syscalls.h>
 #include <linux/suspend.h>
 #include <linux/cpu.h>
+#include <linux/compat.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
@@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp,
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+#define PMU_IOC_GET_BACKLIGHT32        _IOR('B', 1, compat_size_t)
+#define PMU_IOC_SET_BACKLIGHT32        _IOW('B', 2, compat_size_t)
+#define PMU_IOC_GET_MODEL32    _IOR('B', 3, compat_size_t)
+#define PMU_IOC_HAS_ADB32      _IOR('B', 4, compat_size_t)
+#define PMU_IOC_CAN_SLEEP32    _IOR('B', 5, compat_size_t)
+#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
+
+static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
+{
+       switch (cmd) {
+       case PMU_IOC_SLEEP:
+               break;
+       case PMU_IOC_GET_BACKLIGHT32:
+               cmd = PMU_IOC_GET_BACKLIGHT;
+               break;
+       case PMU_IOC_SET_BACKLIGHT32:
+               cmd = PMU_IOC_SET_BACKLIGHT;
+               break;
+       case PMU_IOC_GET_MODEL32:
+               cmd = PMU_IOC_GET_MODEL;
+               break;
+       case PMU_IOC_HAS_ADB32:
+               cmd = PMU_IOC_HAS_ADB;
+               break;
+       case PMU_IOC_CAN_SLEEP32:
+               cmd = PMU_IOC_CAN_SLEEP;
+               break;
+       case PMU_IOC_GRAB_BACKLIGHT32:
+               cmd = PMU_IOC_GRAB_BACKLIGHT;
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
 static const struct file_operations pmu_device_fops = {
        .read           = pmu_read,
        .write          = pmu_write,
        .poll           = pmu_fpoll,
        .unlocked_ioctl = pmu_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = compat_pmu_ioctl,
+#endif
        .open           = pmu_open,
        .release        = pmu_release,
 };
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644 (file)
index a7afec6..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-mktables
-raid6altivec*.c
-raid6int*.c
-raid6tables.c
index 1ba1e122e948931bf16a504ebe662e16e1404a8f..ed4900ade93a4d80b84784e76aafc406689020b2 100644 (file)
@@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
                   atomic_read(&bitmap->mddev->recovery_active) == 0);
 
        bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
-       if (bitmap->mddev->persistent)
-               set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+       set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
        sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
        s = 0;
        while (s < sector && s < bitmap->mddev->resync_max_sectors) {
index c148b630215484f9689bf9257d6acf286685c37a..43cf9cc9c1df3650c228ce01920645fb474f105a 100644 (file)
@@ -2167,9 +2167,9 @@ repeat:
                                rdev->recovery_offset = mddev->curr_resync_completed;
 
        }       
-       if (mddev->external || !mddev->persistent) {
-               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+       if (!mddev->persistent) {
                clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
                wake_up(&mddev->sb_wait);
                return;
        }
@@ -2178,7 +2178,6 @@ repeat:
 
        mddev->utime = get_seconds();
 
-       set_bit(MD_CHANGE_PENDING, &mddev->flags);
        if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
                force_change = 1;
        if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -3371,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page)
                case 0:
                        if (mddev->in_sync)
                                st = clean;
-                       else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+                       else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
                                st = write_pending;
                        else if (mddev->safemode)
                                st = active_idle;
@@ -3452,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                                        mddev->in_sync = 1;
                                        if (mddev->safemode == 1)
                                                mddev->safemode = 0;
-                                       if (mddev->persistent)
-                                               set_bit(MD_CHANGE_CLEAN,
-                                                       &mddev->flags);
+                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                                }
                                err = 0;
                        } else
@@ -3466,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
        case active:
                if (mddev->pers) {
                        restart_array(mddev);
-                       if (mddev->external)
-                               clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
                        wake_up(&mddev->sb_wait);
                        err = 0;
                } else {
@@ -6572,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
                        set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       set_bit(MD_CHANGE_PENDING, &mddev->flags);
                        md_wakeup_thread(mddev->thread);
                        did_change = 1;
                }
@@ -6580,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
        wait_event(mddev->sb_wait,
-                  !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
                   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
 }
 
@@ -6616,6 +6612,7 @@ int md_allow_write(mddev_t *mddev)
        if (mddev->in_sync) {
                mddev->in_sync = 0;
                set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               set_bit(MD_CHANGE_PENDING, &mddev->flags);
                if (mddev->safemode_delay &&
                    mddev->safemode == 0)
                        mddev->safemode = 1;
@@ -6625,7 +6622,7 @@ int md_allow_write(mddev_t *mddev)
        } else
                spin_unlock_irq(&mddev->write_lock);
 
-       if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+       if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
                return -EAGAIN;
        else
                return 0;
@@ -6823,8 +6820,7 @@ void md_do_sync(mddev_t *mddev)
                                   atomic_read(&mddev->recovery_active) == 0);
                        mddev->curr_resync_completed =
                                mddev->curr_resync;
-                       if (mddev->persistent)
-                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
                }
 
@@ -7103,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev)
                            mddev->recovery_cp == MaxSector) {
                                mddev->in_sync = 1;
                                did_change = 1;
-                               if (mddev->persistent)
-                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        }
                        if (mddev->safemode == 1)
                                mddev->safemode = 0;
index a953fe2808ae7ef17b1046b6cff6bc6276c1b8c1..3931299788dcefe14385980c584321289cedc8db 100644 (file)
@@ -140,7 +140,7 @@ struct mddev_s
        unsigned long                   flags;
 #define MD_CHANGE_DEVS 0       /* Some device status has changed */
 #define MD_CHANGE_CLEAN 1      /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2    /* superblock update in progress */
+#define MD_CHANGE_PENDING 2    /* switch from 'clean' to 'active' in progress */
 
        int                             suspended;
        atomic_t                        active_io;
index decdeda840d01158f070a2dcf8778c1c749d41a8..fd0830ed10d81cccb53b574336daa0942353b96c 100644 (file)
@@ -1,6 +1,6 @@
 config MANTIS_CORE
        tristate "Mantis/Hopper PCI bridge based devices"
-       depends on PCI && I2C && INPUT
+       depends on PCI && I2C && INPUT && IR_CORE
 
        help
          Support for PCI cards based on the Mantis and Hopper PCi bridge.
index bd2755e8d9a3d327f7ae54bedd0e7820f85ea5fe..f332c52968b75d7528ee8c5f21eaf561a76d373d 100644 (file)
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
                goto err;
        }
 
-       err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
-       if (!err) {
+       if (ocr & R4_MEMORY_PRESENT
+           && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
                card->type = MMC_TYPE_SD_COMBO;
 
                if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
index 5f3a599ead07bbdfae11f7abc0198285e743db81..87226cd202a5086f7d90699f0a43d6d4e99725a1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/clk.h>
 #include <linux/atmel_pdc.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 
 #include <linux/mmc/host.h>
 
index 9a68ff4353a2e83878fce5429afe9351140daf57..5a950b16d9e629dc3d08041bda547b165cadee76 100644 (file)
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
 
                while (delay--) {
                        reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN)
+                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
                                /* Check twice before cut */
                                reg = readw(host->base + MMC_REG_STATUS);
                                if (reg & STATUS_CARD_BUS_CLK_RUN)
                                        return 0;
+                       }
 
                        if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
                                return 0;
index 4a8776f8afdd690048c69de91e755d35d2c884a5..4526d2791f2990229acbe9ef0f5c88286819807f 100644 (file)
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
        int ret = 0;
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
-       pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
        if (host && host->suspended)
                return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
                        }
                }
                cancel_work_sync(&host->mmc_carddetect_work);
-               mmc_host_enable(host->mmc);
                ret = mmc_suspend_host(host->mmc);
+               mmc_host_enable(host->mmc);
                if (ret == 0) {
                        omap_hsmmc_disable_irq(host);
                        OMAP_HSMMC_WRITE(host->base, HCTL,
index 2e16e0a90a5e1a5d8d3d7487727baa39d701b8fb..976330de379ecc78cbe91f19c4bd9495d4722dae 100644 (file)
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
        host->pio_active        = XFER_NONE;
 
 #ifdef CONFIG_MMC_S3C_PIODMA
-       host->dodma             = host->pdata->dma;
+       host->dodma             = host->pdata->use_dma;
 #endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index ee7d0a5a51c496cb92b04e7b9bc8172f8a233875..69d98e3bf6abaa3c784d1d70f171387b3142eb64 100644 (file)
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
+       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
-             host->sg_off);
+       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+       buf = (unsigned short *)(sg_virt + host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(host, &flags);
+       tmio_mmc_kunmap_atomic(sg_virt, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index 64f7d5dfc106ac7b39e37842c5eca9a898dbbb06..0fedc78e3ea5c4613767d7d31e534143b4bf1780 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               u32 mask;\
-               mask  = sd_ctrl_read32((host), CTL_STATUS); \
-               mask &= ~((i) & TMIO_MASK_IRQ); \
-               sd_ctrl_write32((host), CTL_STATUS, mask); \
+               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
        } while (0)
 
 
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
        unsigned long *flags)
 {
-       struct scatterlist *sg = host->sg_ptr;
-
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
        unsigned long *flags)
 {
-       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index 2246f154e2f7a6f57317cf2232070f39ada5d4dc..61f6e5e404582cb26cae8cebd563f0a61869d9f2 100644 (file)
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
        depends on SYSFS
        depends on MTD_UBI
        select DEBUG_FS
-       select KALLSYMS_ALL
+       select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
        help
          This option enables UBI debugging.
 
index 4dfa6b90c21c30566d35939d7d3837327495f208..3d2d1a69e9a084b01c43ed2b23c75679cdebcaef 100644 (file)
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
                        goto out_free;
                }
 
-               re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
-               if (!re) {
+               re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+               if (!re1) {
                        err = -ENOMEM;
                        ubi_close_volume(desc);
                        goto out_free;
                }
 
-               re->remove = 1;
-               re->desc = desc;
-               list_add(&re->list, &rename_list);
+               re1->remove = 1;
+               re1->desc = desc;
+               list_add(&re1->list, &rename_list);
                dbg_msg("will remove volume %d, name \"%s\"",
-                       re->desc->vol->vol_id, re->desc->vol->name);
+                       re1->desc->vol->vol_id, re1->desc->vol->name);
        }
 
        mutex_lock(&ubi->device_mutex);
index 372a15ac9995a64c7fc8f8084c75cb59ba4251d1..69b52e9c9489f961a8c11245b7fe90258078837e 100644 (file)
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                case UBI_COMPAT_DELETE:
                        ubi_msg("\"delete\" compatible internal volume %d:%d"
                                " found, will remove it", vol_id, lnum);
-                       err = add_to_list(si, pnum, ec, &si->corr);
+                       err = add_to_list(si, pnum, ec, &si->erase);
                        if (err)
                                return err;
                        return 0;
index ee7b1d8fbb92c9d03fbe8026100378e100b4ffc1..97a435672eafac554bd76eeb0a8efe4422602a5e 100644 (file)
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
 retry:
        spin_lock(&ubi->wl_lock);
        e = ubi->lookuptbl[pnum];
-       if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+       if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+                                  in_wl_tree(e, &ubi->erroneous)) {
                spin_unlock(&ubi->wl_lock);
                return 0;
        }
index c754d88e5ec92d0af82d80094a4576256a50dc61..a045559c81cf09e37bce9dbef9c56f1325523269 100644 (file)
@@ -633,7 +633,8 @@ struct vortex_private {
                open:1,
                medialock:1,
                must_free_region:1,                             /* Flag: if zero, Cardbus owns the I/O region */
-               large_frames:1;                 /* accept large frames */
+               large_frames:1,                 /* accept large frames */
+               handling_irq:1;                 /* private in_irq indicator */
        int drv_flags;
        u16 status_enable;
        u16 intr_enable;
@@ -646,7 +647,7 @@ struct vortex_private {
        u16 io_size;                                            /* Size of PCI region (for release_region) */
 
        /* Serialises access to hardware other than MII and variables below.
-        * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+        * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
        spinlock_t lock;
 
        spinlock_t mii_lock;            /* Serialises access to MII */
@@ -2133,6 +2134,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                           dev->name, vp->cur_tx);
        }
 
+       /*
+        * We can't allow a recursion from our interrupt handler back into the
+        * tx routine, as they take the same spin lock, and that causes
+        * deadlock.  Just return NETDEV_TX_BUSY and let the stack try again in
+        * a bit
+        */
+       if (vp->handling_irq)
+               return NETDEV_TX_BUSY;
+
        if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
                if (vortex_debug > 0)
                        pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2335,11 +2345,13 @@ boomerang_interrupt(int irq, void *dev_id)
 
        ioaddr = vp->ioaddr;
 
+
        /*
         * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
         * and boomerang_start_xmit
         */
        spin_lock(&vp->lock);
+       vp->handling_irq = 1;
 
        status = ioread16(ioaddr + EL3_STATUS);
 
@@ -2447,6 +2459,7 @@ boomerang_interrupt(int irq, void *dev_id)
                pr_debug("%s: exiting interrupt, status %4.4x.\n",
                           dev->name, status);
 handler_exit:
+       vp->handling_irq = 0;
        spin_unlock(&vp->lock);
        return IRQ_HANDLED;
 }
@@ -2971,7 +2984,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        int err;
        struct vortex_private *vp = netdev_priv(dev);
-       unsigned long flags;
        pci_power_t state = 0;
 
        if(VORTEX_PCI(vp))
@@ -2981,9 +2993,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
        if(state != 0)
                pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
-       spin_lock_irqsave(&vp->lock, flags);
        err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
-       spin_unlock_irqrestore(&vp->lock, flags);
        if(state != 0)
                pci_set_power_state(VORTEX_PCI(vp), state);
 
index 5a6895320b48a8d7774acfb4e6b239713cc6bd9d..2cc81a54cbf322a49ccbf474f5d41f654faf109d 100644 (file)
@@ -928,6 +928,16 @@ config SMC91X
          The module will be called smc91x.  If you want to compile it as a
          module, say M here and read <file:Documentation/kbuild/modules.txt>.
 
+config PXA168_ETH
+       tristate "Marvell pxa168 ethernet support"
+       depends on CPU_PXA168
+       select PHYLIB
+       help
+         This driver supports the pxa168 Ethernet ports.
+
+         To compile this driver as a module, choose M here. The module
+         will be called pxa168_eth.
+
 config NET_NETX
        tristate "NetX Ethernet support"
        select MII
index 56e8c27f77cebe9ef3b9ac3c1284f7e2975f8448..3e8f150c4b14b0034edb3632b9de33b1338b9635 100644 (file)
@@ -244,6 +244,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
 obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_SMSC911X) += smsc911x.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
index 53af9c93e75c3bca661abfb8dce7aabe96589f52..0c2d96ed561c46ebd63f3c03c119c0751b5204e1 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.52.53-3"
-#define DRV_MODULE_RELDATE      "2010/18/04"
+#define DRV_MODULE_VERSION      "1.52.53-4"
+#define DRV_MODULE_RELDATE      "2010/16/08"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
index b4ec2b02a465cf7d822f23700c119d6e93a2121b..f8c3f08e4ce73fb4d5d37739fa7dd18cfc557da7 100644 (file)
@@ -4328,10 +4328,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
                val |= aeu_gpio_mask;
                REG_WR(bp, offset, val);
                }
+               bp->port.need_hw_lock = 1;
                break;
 
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+               bp->port.need_hw_lock = 1;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                /* add SPIO 5 to group 0 */
                {
                u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4341,7 +4343,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
                REG_WR(bp, reg_addr, val);
                }
                break;
-
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+               bp->port.need_hw_lock = 1;
+               break;
        default:
                break;
        }
index 631a6242b0112aec566b9df0916eb9b1d1552be5..75bfc3a9d95f3f118e6eae1cdc649fd0a442baeb 100644 (file)
@@ -15,7 +15,7 @@ config CAIF_TTY
 
 config CAIF_SPI_SLAVE
        tristate "CAIF SPI transport driver for slave interface"
-       depends on CAIF
+       depends on CAIF && HAS_DMA
        default n
        ---help---
        The CAIF Link layer SPI Protocol driver for Slave SPI interface.
index a4a0d2b6eb1c60e116811b31378a426ad00a44e2..d3d4a57e24505f9c36af9ef139bfc9133788a4bd 100644 (file)
@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
        ew32(IMC, 0xffffffff);
        icr = er32(ICR);
 
-       /* Install any alternate MAC address into RAR0 */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               return ret_val;
+       if (hw->mac.type == e1000_82571) {
+               /* Install any alternate MAC address into RAR0 */
+               ret_val = e1000_check_alt_mac_addr_generic(hw);
+               if (ret_val)
+                       return ret_val;
 
-       e1000e_set_laa_state_82571(hw, true);
+               e1000e_set_laa_state_82571(hw, true);
+       }
 
        /* Reinitialize the 82571 serdes link state machine */
        if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
 
-       /*
-        * If there's an alternate MAC address place it in RAR0
-        * so that it will override the Si installed default perm
-        * address.
-        */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               goto out;
+       if (hw->mac.type == e1000_82571) {
+               /*
+                * If there's an alternate MAC address place it in RAR0
+                * so that it will override the Si installed default perm
+                * address.
+                */
+               ret_val = e1000_check_alt_mac_addr_generic(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        ret_val = e1000_read_mac_addr_generic(hw);
 
@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_SWSM_ON_LOAD,
+       .flags2                 = FLAG2_DISABLE_ASPM_L1,
        .pba                    = 20,
        .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
index 307a72f483ee644fb1199776e4b0521739badb46..93b3bedae8d2b2457a88f52b4664fe1b4b6b7dd8 100644 (file)
 #define E1000_FLASH_UPDATES  2000
 
 /* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
 #define NVM_ID_LED_SETTINGS        0x0004
 #define NVM_INIT_CONTROL2_REG      0x000F
 #define NVM_INIT_CONTROL3_PORT_B   0x0014
 /* Mask bits for fields in Word 0x1a of the NVM */
 #define NVM_WORD1A_ASPM_MASK  0x000C
 
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
 #define NVM_SUM                    0xBABA
 
index df4a2792293123eba7b1b6f3a589220ce1185ea1..0fd4eb5ac5fb9241f57061ee5eb76dff06e8afbf 100644 (file)
@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
        u16 offset, nvm_alt_mac_addr_offset, nvm_data;
        u8 alt_mac_addr[ETH_ALEN];
 
+       ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+       if (ret_val)
+               goto out;
+
+       /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+       if (!((nvm_data & NVM_COMPAT_LOM) ||
+             (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+             (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+               goto out;
+
        ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
                                 &nvm_alt_mac_addr_offset);
        if (ret_val) {
index 99a929964e3cebde15667e59d913debd6d205b20..1846623c6ae65d18b6223ada9be4531809f64a99 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "ehea"
-#define DRV_VERSION    "EHEA_0105"
+#define DRV_VERSION    "EHEA_0106"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
        u32 poll_counter;
        struct net_lro_mgr lro_mgr;
        struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+       int sq_restart_flag;
 };
 
 
index 897719b49f96859b11a1f01f4f45e46c03c4cd71..a333b42111b8c2ba20b92eca94bf5648704c4f9a 100644 (file)
@@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev,
        return processed;
 }
 
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+       int i;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               pr->sq_restart_flag = 0;
+       }
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+       struct ehea_swqe *swqe;
+       int swqe_index;
+       int i, k;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               k = 0;
+               swqe = ehea_get_swqe(pr->qp, &swqe_index);
+               memset(swqe, 0, SWQE_HEADER_SIZE);
+               atomic_dec(&pr->swqe_avail);
+
+               swqe->tx_control |= EHEA_SWQE_PURGE;
+               swqe->wr_id = SWQE_RESTART_CHECK;
+               swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+               swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+               swqe->immediate_data_length = 80;
+
+               ehea_post_swqe(pr->qp, swqe);
+
+               while (pr->sq_restart_flag == 0) {
+                       msleep(5);
+                       if (++k == 100) {
+                               ehea_error("HW/SW queues out of sync");
+                               ehea_schedule_port_reset(pr->port);
+                               return;
+                       }
+               }
+       }
+
+       return;
+}
+
+
 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 {
        struct sk_buff *skb;
@@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 
                cqe_counter++;
                rmb();
+
+               if (cqe->wr_id == SWQE_RESTART_CHECK) {
+                       pr->sq_restart_flag = 1;
+                       swqe_av++;
+                       break;
+               }
+
                if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
                        ehea_error("Bad send completion status=0x%04X",
                                   cqe->status);
@@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port)
                int k = 0;
                while (atomic_read(&pr->swqe_avail) < swqe_max) {
                        msleep(5);
-                       if (++k == 20)
+                       if (++k == 20) {
+                               ehea_error("WARNING: sq not flushed completely");
                                break;
+                       }
                }
        }
 }
@@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                        port_napi_disable(port);
                                        mutex_unlock(&port->port_lock);
                                }
+                               reset_sq_restart_flag(port);
                        }
 
                        /* Unregister old memory region */
@@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                                mutex_lock(&port->port_lock);
                                                port_napi_enable(port);
                                                ret = ehea_restart_qps(dev);
+                                               check_sqs(port);
                                                if (!ret)
                                                        netif_wake_queue(dev);
                                                mutex_unlock(&port->port_lock);
index 3995fafc1e08bb3adb8658bd4d0098c4aff09f60..8c6c1e2a87503e2f647fedd44c77dbb2ffcfb3c9 100644 (file)
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
 }
 
 #if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key, struct tty_struct *tty)
+static void emac_sysrq_handler(int key)
 {
        emac_dbg_dump_all();
 }
index 2602852cc55a6037c5160575f3620632397fcd2c..4734c939ad03574a63e6dd0bc9ccf6d8cf29c06a 100644 (file)
@@ -1113,7 +1113,8 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        struct vio_dev *viodev = adapter->vdev;
        int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
-       int i;
+       int i, rc;
+       int need_restart = 0;
 
        if (new_mtu < IBMVETH_MAX_MTU)
                return -EINVAL;
@@ -1127,35 +1128,32 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 
        /* Deactivate all the buffer pools so that the next loop can activate
           only the buffer pools necessary to hold the new MTU */
-       for (i = 0; i < IbmVethNumBufferPools; i++)
-               if (adapter->rx_buff_pool[i].active) {
-                       ibmveth_free_buffer_pool(adapter,
-                                                &adapter->rx_buff_pool[i]);
-                       adapter->rx_buff_pool[i].active = 0;
-               }
+       if (netif_running(adapter->netdev)) {
+               need_restart = 1;
+               adapter->pool_config = 1;
+               ibmveth_close(adapter->netdev);
+               adapter->pool_config = 0;
+       }
 
        /* Look for an active buffer pool that can hold the new MTU */
        for(i = 0; i<IbmVethNumBufferPools; i++) {
                adapter->rx_buff_pool[i].active = 1;
 
                if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
-                       if (netif_running(adapter->netdev)) {
-                               adapter->pool_config = 1;
-                               ibmveth_close(adapter->netdev);
-                               adapter->pool_config = 0;
-                               dev->mtu = new_mtu;
-                               vio_cmo_set_dev_desired(viodev,
-                                               ibmveth_get_desired_dma
-                                               (viodev));
-                               return ibmveth_open(adapter->netdev);
-                       }
                        dev->mtu = new_mtu;
                        vio_cmo_set_dev_desired(viodev,
                                                ibmveth_get_desired_dma
                                                (viodev));
+                       if (need_restart) {
+                               return ibmveth_open(adapter->netdev);
+                       }
                        return 0;
                }
        }
+
+       if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+               return rc;
+
        return -EINVAL;
 }
 
index c7b624711f5ecfc51981845857bda27ef264fa3e..bdf2149e529689b1135603904da6343fcbbbebc2 100644 (file)
@@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev)
        disable_irq(lp->tx_irq);
        disable_irq(lp->rx_irq);
 
-       ll_temac_rx_irq(lp->tx_irq, lp);
-       ll_temac_tx_irq(lp->rx_irq, lp);
+       ll_temac_rx_irq(lp->tx_irq, ndev);
+       ll_temac_tx_irq(lp->rx_irq, ndev);
 
        enable_irq(lp->tx_irq);
        enable_irq(lp->rx_irq);
index ffa1b9ce1cc5a8f4c474136940df4de36d853c55..6dca3574e35507a94ca7e5b1b518a06199e99e03 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 73
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.73"
+#define _NETXEN_NIC_LINUX_SUBVERSION 74
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.74"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
index c865dda2adf15f8b59a579640d416099e9fabc0c..cabae7bb1fc6777d3366c5a8728feadcd53d0aa3 100644 (file)
@@ -1805,8 +1805,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
        netxen_ctx_msg msg = 0;
        struct list_head *head;
 
-       spin_lock(&rds_ring->lock);
-
        producer = rds_ring->producer;
 
        head = &rds_ring->free_list;
@@ -1853,8 +1851,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
                                        NETXEN_RCV_PRODUCER_OFFSET), msg);
                }
        }
-
-       spin_unlock(&rds_ring->lock);
 }
 
 static void
index fd86e18604e636a5b1ede55a077d56dd2ba06713..73d31459223098c8e3727a0565ed12805d58e388 100644 (file)
@@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
        struct netxen_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
-       memset(stats, 0, sizeof(*stats));
-
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes;
@@ -2133,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void netxen_nic_poll_controller(struct net_device *netdev)
 {
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
        struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
        disable_irq(adapter->irq);
-       netxen_intr(adapter->irq, adapter);
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netxen_intr(adapter->irq, sds_ring);
+       }
        enable_irq(adapter->irq);
 }
 #endif
index c3edfe4c26516fc14780ee19183fc6db95fbf273..49279b0ee526a54a534c6f8c04e7c48aa4aba0d0 100644 (file)
@@ -1637,6 +1637,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
        PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
        PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+       PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
        PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
        PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
        PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
index c0761197c07e6a94e67e3d0d4f417d888e2ec68f..16ddc77313cb08348b3cdbdb7016bd5c56fb9304 100644 (file)
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 
        phydev->interface = interface;
 
+       phydev->state = PHY_READY;
+
        /* Do initial configuration here, now that
         * we have certain key parameters
         * (dev_flags and interface) */
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644 (file)
index 0000000..85eddda
--- /dev/null
@@ -0,0 +1,1666 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ *             Sachin Sanap <ssanap@marvell.com>
+ *             Philip Rakity <prakity@marvell.com>
+ *             Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME    "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS            0x0000
+#define SMI                    0x0010
+#define PORT_CONFIG            0x0400
+#define PORT_CONFIG_EXT                0x0408
+#define PORT_COMMAND           0x0410
+#define PORT_STATUS            0x0418
+#define HTPR                   0x0428
+#define SDMA_CONFIG            0x0440
+#define SDMA_CMD               0x0448
+#define INT_CAUSE              0x0450
+#define INT_W_CLEAR            0x0454
+#define INT_MASK               0x0458
+#define ETH_F_RX_DESC_0                0x0480
+#define ETH_C_RX_DESC_0                0x04A0
+#define ETH_C_TX_DESC_1                0x04E4
+
+/* smi register */
+#define SMI_BUSY               (1 << 28)       /* 0 - Write, 1 - Read  */
+#define SMI_R_VALID            (1 << 27)       /* 0 - Write, 1 - Read  */
+#define SMI_OP_W               (0 << 26)       /* Write operation      */
+#define SMI_OP_R               (1 << 26)       /* Read operation */
+
+#define PHY_WAIT_ITERATIONS    10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT    0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA       (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT              (1 << 23)
+#define RX_FIRST_DESC          (1 << 17)
+#define RX_LAST_DESC           (1 << 16)
+#define RX_ERROR               (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT              (1 << 23)
+#define TX_GEN_CRC             (1 << 22)
+#define TX_ZERO_PADDING                (1 << 18)
+#define TX_FIRST_DESC          (1 << 17)
+#define TX_LAST_DESC           (1 << 16)
+#define TX_ERROR               (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT            (1 << 31)
+#define SDMA_CMD_TXDL          (1 << 24)
+#define SDMA_CMD_TXDH          (1 << 23)
+#define SDMA_CMD_AR            (1 << 15)
+#define SDMA_CMD_ERD           (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS                 (1 << 12)
+#define PCR_EN                 (1 << 7)
+#define PCR_PM                 (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM              (1 << 28)
+#define PCXR_DSCP_EN           (1 << 21)
+#define PCXR_MFL_1518          (0 << 14)
+#define PCXR_MFL_1536          (1 << 14)
+#define PCXR_MFL_2048          (2 << 14)
+#define PCXR_MFL_64K           (3 << 14)
+#define PCXR_FLP               (1 << 11)
+#define PCXR_PRIO_TX_OFF       3
+#define PCXR_TX_HIGH_PRI       (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF           12
+#define SDCR_BSZ8              (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4              (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2              (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1              (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR              (1 << 6)
+#define SDCR_BLMT              (1 << 7)
+#define SDCR_RIFB              (1 << 9)
+#define SDCR_RC_OFF            2
+#define SDCR_RC_MAX_RETRANS    (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF              (1 << 0)
+#define ICR_TXBUF_H            (1 << 2)
+#define ICR_TXBUF_L            (1 << 3)
+#define ICR_TXEND_H            (1 << 6)
+#define ICR_TXEND_L            (1 << 7)
+#define ICR_RXERR              (1 << 8)
+#define ICR_TXERR_H            (1 << 10)
+#define ICR_TXERR_L            (1 << 11)
+#define ICR_TX_UDR             (1 << 13)
+#define ICR_MII_CH             (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
+                               ICR_TXERR_H  | ICR_TXERR_L |\
+                               ICR_TXEND_H  | ICR_TXEND_L |\
+                               ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN                2       /* hw aligns IP header */
+
+#define NUM_RX_DESCS           64
+#define NUM_TX_DESCS           64
+
+#define HASH_ADD               0
+#define HASH_DELETE            1
+#define HASH_ADDR_TABLE_SIZE   0x4000  /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER             12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100         (1 << 0)
+#define FULL_DUPLEX            (1 << 1)
+#define FLOW_CONTROL_ENABLED   (1 << 2)
+#define LINK_UP                        (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK              (1 << 0)
+#define WORK_TX_DONE           (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN                ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+       u32 cmd_sts;            /* Descriptor command status            */
+       u16 byte_cnt;           /* Descriptor buffer byte count         */
+       u16 buf_size;           /* Buffer size                          */
+       u32 buf_ptr;            /* Descriptor buffer pointer            */
+       u32 next_desc_ptr;      /* Next descriptor pointer              */
+};
+
+struct tx_desc {
+       u32 cmd_sts;            /* Command/status field                 */
+       u16 reserved;
+       u16 byte_cnt;           /* buffer byte count                    */
+       u32 buf_ptr;            /* pointer to buffer for this descriptor */
+       u32 next_desc_ptr;      /* Pointer to next descriptor           */
+};
+
+struct pxa168_eth_private {
+       int port_num;           /* User Ethernet port number    */
+
+       int rx_resource_err;    /* Rx ring resource error flag */
+
+       /* Next available and first returning Rx resource */
+       int rx_curr_desc_q, rx_used_desc_q;
+
+       /* Next available and first returning Tx resource */
+       int tx_curr_desc_q, tx_used_desc_q;
+
+       struct rx_desc *p_rx_desc_area;
+       dma_addr_t rx_desc_dma;
+       int rx_desc_area_size;
+       struct sk_buff **rx_skb;
+
+       struct tx_desc *p_tx_desc_area;
+       dma_addr_t tx_desc_dma;
+       int tx_desc_area_size;
+       struct sk_buff **tx_skb;
+
+       struct work_struct tx_timeout_task;
+
+       struct net_device *dev;
+       struct napi_struct napi;
+       u8 work_todo;
+       int skb_size;
+
+       struct net_device_stats stats;
+       /* Size of Tx Ring per queue */
+       int tx_ring_size;
+       /* Number of tx descriptors in use */
+       int tx_desc_count;
+       /* Size of Rx Ring per queue */
+       int rx_ring_size;
+       /* Number of rx descriptors in use */
+       int rx_desc_count;
+
+       /*
+        * Used in case RX Ring is empty, which can occur when
+        * system does not have resources (skb's)
+        */
+       struct timer_list timeout;
+       struct mii_bus *smi_bus;
+       struct phy_device *phy;
+
+       /* clock */
+       struct clk *clk;
+       struct pxa168_eth_platform_data *pd;
+       /*
+        * Ethernet controller base address.
+        */
+       void __iomem *base;
+
+       /* Pointer to the hardware address filter table */
+       void *htpr;
+       dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+       __le32 lo;
+       __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+       HASH_ENTRY_VALID = 1,
+       SKIP = 2,
+       HASH_ENTRY_RECEIVE_DISCARD = 4,
+       HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+       return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+       writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+       int delay;
+       int max_retries = 40;
+
+       do {
+               wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+               udelay(100);
+
+               delay = 10;
+               while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+                      && delay-- > 0) {
+                       udelay(10);
+               }
+       } while (max_retries-- > 0 && delay <= 0);
+
+       if (max_retries <= 0)
+               printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+       unsigned int reg_data;
+
+       reg_data = rdl(pep, PHY_ADDRESS);
+
+       return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+       u32 reg_data;
+       int addr_shift = 5 * pep->port_num;
+
+       reg_data = rdl(pep, PHY_ADDRESS);
+       reg_data &= ~(0x1f << addr_shift);
+       reg_data |= (phy_addr & 0x1f) << addr_shift;
+       wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+       int data;
+
+       data = phy_read(pep->phy, MII_BMCR);
+       if (data < 0)
+               return;
+
+       data |= BMCR_RESET;
+       if (phy_write(pep->phy, MII_BMCR, data) < 0)
+               return;
+
+       do {
+               data = phy_read(pep->phy, MII_BMCR);
+       } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct sk_buff *skb;
+       struct rx_desc *p_used_rx_desc;
+       int used_rx_desc;
+
+       while (pep->rx_desc_count < pep->rx_ring_size) {
+               int size;
+
+               skb = dev_alloc_skb(pep->skb_size);
+               if (!skb)
+                       break;
+               if (SKB_DMA_REALIGN)
+                       skb_reserve(skb, SKB_DMA_REALIGN);
+               pep->rx_desc_count++;
+               /* Get 'used' Rx descriptor */
+               used_rx_desc = pep->rx_used_desc_q;
+               p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+               size = skb->end - skb->data;
+               p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+                                                        skb->data,
+                                                        size,
+                                                        DMA_FROM_DEVICE);
+               p_used_rx_desc->buf_size = size;
+               pep->rx_skb[used_rx_desc] = skb;
+
+               /* Return the descriptor to DMA ownership */
+               wmb();
+               p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+               wmb();
+
+               /* Move the used descriptor pointer to the next descriptor */
+               pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+               /* Any Rx return cancels the Rx resource error status */
+               pep->rx_resource_err = 0;
+
+               skb_reserve(skb, ETH_HW_IP_ALIGN);
+       }
+
+       /*
+        * If RX ring is empty of SKB, set a timer to try allocating
+        * again at a later time.
+        */
+       if (pep->rx_desc_count == 0) {
+               pep->timeout.expires = jiffies + (HZ / 10);
+               add_timer(&pep->timeout);
+       }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+       struct pxa168_eth_private *pep = (void *)data;
+       napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+       return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+           | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+           | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+           | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+       int i;
+       for (i = 0; i < ETH_ALEN; i++) {
+               mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+                               ((mac_addr[i] & 0xf0) >> 4);
+       }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+       int i;
+       for (i = 0; i < ETH_ALEN; i++)
+               mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig    - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+       u32 hash_result;
+       u32 addr0;
+       u32 addr1;
+       u32 addr2;
+       u32 addr3;
+       unsigned char mac_addr[ETH_ALEN];
+
+       /* Make a copy of MAC address since we are going to performe bit
+        * operations on it
+        */
+       memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+       nibble_swap_every_byte(mac_addr);
+       inverse_every_nibble(mac_addr);
+
+       addr0 = (mac_addr[5] >> 2) & 0x3f;
+       addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+       addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+       addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+       hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+       hash_result = hash_result & 0x07ff;
+       return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ *       part of chain in the hash table.We cant just delete the entry since
+ *       that will break the chain.We need to defragment the tables time to
+ *       time.
+ * rd   - 0 Discard packet upon match.
+ *     - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+                             unsigned char *mac_addr,
+                             u32 rd, u32 skip, int del)
+{
+       struct addr_table_entry *entry, *start;
+       u32 new_high;
+       u32 new_low;
+       u32 i;
+
+       new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+           | (((mac_addr[1] >> 0) & 0xf) << 11)
+           | (((mac_addr[0] >> 4) & 0xf) << 7)
+           | (((mac_addr[0] >> 0) & 0xf) << 3)
+           | (((mac_addr[3] >> 4) & 0x1) << 31)
+           | (((mac_addr[3] >> 0) & 0xf) << 27)
+           | (((mac_addr[2] >> 4) & 0xf) << 23)
+           | (((mac_addr[2] >> 0) & 0xf) << 19)
+           | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+           | HASH_ENTRY_VALID;
+
+       new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+           | (((mac_addr[5] >> 0) & 0xf) << 11)
+           | (((mac_addr[4] >> 4) & 0xf) << 7)
+           | (((mac_addr[4] >> 0) & 0xf) << 3)
+           | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+       /*
+        * Pick the appropriate table, start scanning for free/reusable
+        * entries at the index obtained by hashing the specified MAC address
+        */
+       start = (struct addr_table_entry *)(pep->htpr);
+       entry = start + hash_function(mac_addr);
+       for (i = 0; i < HOP_NUMBER; i++) {
+               if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+                       break;
+               } else {
+                       /* if same address put in same position */
+                       if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+                               (new_low & 0xfffffff8)) &&
+                               (le32_to_cpu(entry->hi) == new_high)) {
+                               break;
+                       }
+               }
+               if (entry == start + 0x7ff)
+                       entry = start;
+               else
+                       entry++;
+       }
+
+       if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+           (le32_to_cpu(entry->hi) != new_high) && del)
+               return 0;
+
+       if (i == HOP_NUMBER) {
+               if (!del) {
+                       printk(KERN_INFO "%s: table section is full, need to "
+                                       "move to 16kB implementation?\n",
+                                        __FILE__);
+                       return -ENOSPC;
+               } else
+                       return 0;
+       }
+
+       /*
+        * Update the selected entry
+        */
+       if (del) {
+               entry->hi = 0;
+               entry->lo = 0;
+       } else {
+               entry->hi = cpu_to_le32(new_high);
+               entry->lo = cpu_to_le32(new_low);
+       }
+
+       return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ *  Create an addressTable entry from MAC address info
+ *  found in the specifed net_device struct
+ *
+ *  Input : pointer to ethernet interface network device structure
+ *  Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+                                         unsigned char *oaddr,
+                                         unsigned char *addr)
+{
+       /* Delete old entry */
+       if (oaddr)
+               add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+       /* Add new entry */
+       add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+       /*
+        * Hardware expects CPU to build a hash table based on a predefined
+        * hash function and populate it based on hardware address. The
+        * location of the hash table is identified by 32-bit pointer stored
+        * in HTPR internal register. Two possible sizes exists for the hash
+        * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+        * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+        * 1/2kB.
+        */
+       /* TODO: Add support for 8kB hash table and alternative hash
+        * function.Driver can dynamically switch to them if the 1/2kB hash
+        * table is full.
+        */
+       if (pep->htpr == NULL) {
+               pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+                                             HASH_ADDR_TABLE_SIZE,
+                                             &pep->htpr_dma, GFP_KERNEL);
+               if (pep->htpr == NULL)
+                       return -ENOMEM;
+       }
+       memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+       wrl(pep, HTPR, pep->htpr_dma);
+       return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       u32 val;
+
+       val = rdl(pep, PORT_CONFIG);
+       if (dev->flags & IFF_PROMISC)
+               val |= PCR_PM;
+       else
+               val &= ~PCR_PM;
+       wrl(pep, PORT_CONFIG, val);
+
+       /*
+        * Remove the old list of MAC address and add dev->addr
+        * and multicast address.
+        */
+       memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+       update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+       netdev_for_each_mc_addr(ha, dev)
+               update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+       struct sockaddr *sa = addr;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       unsigned char oldMac[ETH_ALEN];
+
+       if (!is_valid_ether_addr(sa->sa_data))
+               return -EINVAL;
+       memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+       memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+       netif_addr_lock_bh(dev);
+       update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+       netif_addr_unlock_bh(dev);
+       return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+       unsigned int val = 0;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int tx_curr_desc, rx_curr_desc;
+
+       /* Perform PHY reset, if there is a PHY. */
+       if (pep->phy != NULL) {
+               struct ethtool_cmd cmd;
+
+               pxa168_get_settings(pep->dev, &cmd);
+               ethernet_phy_reset(pep);
+               pxa168_set_settings(pep->dev, &cmd);
+       }
+
+       /* Assignment of Tx CTRP of given queue */
+       tx_curr_desc = pep->tx_curr_desc_q;
+       wrl(pep, ETH_C_TX_DESC_1,
+           (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+       /* Assignment of Rx CRDP of given queue */
+       rx_curr_desc = pep->rx_curr_desc_q;
+       wrl(pep, ETH_C_RX_DESC_0,
+           (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+       wrl(pep, ETH_F_RX_DESC_0,
+           (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+       /* Clear all interrupts */
+       wrl(pep, INT_CAUSE, 0);
+
+       /* Enable all interrupts for receive, transmit and error. */
+       wrl(pep, INT_MASK, ALL_INTS);
+
+       val = rdl(pep, PORT_CONFIG);
+       val |= PCR_EN;
+       wrl(pep, PORT_CONFIG, val);
+
+       /* Start RX DMA engine */
+       val = rdl(pep, SDMA_CMD);
+       val |= SDMA_CMD_ERD;
+       wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       unsigned int val = 0;
+
+       /* Stop all interrupts for receive, transmit and error. */
+       wrl(pep, INT_MASK, 0);
+
+       /* Clear all interrupts */
+       wrl(pep, INT_CAUSE, 0);
+
+       /* Stop RX DMA */
+       val = rdl(pep, SDMA_CMD);
+       val &= ~SDMA_CMD_ERD;   /* abort dma command */
+
+       /* Abort any transmit and receive operations and put DMA
+        * in idle state.
+        */
+       abort_dma(pep);
+
+       /* Disable port */
+       val = rdl(pep, PORT_CONFIG);
+       val &= ~PCR_EN;
+       wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct tx_desc *desc;
+       u32 cmd_sts;
+       struct sk_buff *skb;
+       int tx_index;
+       dma_addr_t addr;
+       int count;
+       int released = 0;
+
+       netif_tx_lock(dev);
+
+       pep->work_todo &= ~WORK_TX_DONE;
+       while (pep->tx_desc_count > 0) {
+               tx_index = pep->tx_used_desc_q;
+               desc = &pep->p_tx_desc_area[tx_index];
+               cmd_sts = desc->cmd_sts;
+               if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+                       if (released > 0) {
+                               goto txq_reclaim_end;
+                       } else {
+                               released = -1;
+                               goto txq_reclaim_end;
+                       }
+               }
+               pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+               pep->tx_desc_count--;
+               addr = desc->buf_ptr;
+               count = desc->byte_cnt;
+               skb = pep->tx_skb[tx_index];
+               if (skb)
+                       pep->tx_skb[tx_index] = NULL;
+
+               if (cmd_sts & TX_ERROR) {
+                       if (net_ratelimit())
+                               printk(KERN_ERR "%s: Error in TX\n", dev->name);
+                       dev->stats.tx_errors++;
+               }
+               dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+               if (skb)
+                       dev_kfree_skb_irq(skb);
+               released++;
+       }
+txq_reclaim_end:
+       netif_tx_unlock(dev);
+       return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       printk(KERN_INFO "%s: TX timeout  desc_count %d\n",
+              dev->name, pep->tx_desc_count);
+
+       schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+       struct pxa168_eth_private *pep = container_of(work,
+                                                struct pxa168_eth_private,
+                                                tx_timeout_task);
+       struct net_device *dev = pep->dev;
+       pxa168_eth_stop(dev);
+       pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int received_packets = 0;
+       struct sk_buff *skb;
+
+       while (budget-- > 0) {
+               int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+               struct rx_desc *rx_desc;
+               unsigned int cmd_sts;
+
+               /* Do not process Rx ring in case of Rx ring resource error */
+               if (pep->rx_resource_err)
+                       break;
+               rx_curr_desc = pep->rx_curr_desc_q;
+               rx_used_desc = pep->rx_used_desc_q;
+               rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+               cmd_sts = rx_desc->cmd_sts;
+               rmb();
+               if (cmd_sts & (BUF_OWNED_BY_DMA))
+                       break;
+               skb = pep->rx_skb[rx_curr_desc];
+               pep->rx_skb[rx_curr_desc] = NULL;
+
+               rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+               pep->rx_curr_desc_q = rx_next_curr_desc;
+
+               /* Rx descriptors exhausted. */
+               /* Set the Rx ring resource error flag */
+               if (rx_next_curr_desc == rx_used_desc)
+                       pep->rx_resource_err = 1;
+               pep->rx_desc_count--;
+               dma_unmap_single(NULL, rx_desc->buf_ptr,
+                                rx_desc->buf_size,
+                                DMA_FROM_DEVICE);
+               received_packets++;
+               /*
+                * Update statistics.
+                * Note byte count includes 4 byte CRC count
+                */
+               stats->rx_packets++;
+               stats->rx_bytes += rx_desc->byte_cnt;
+               /*
+                * In case received a packet without first / last bits on OR
+                * the error summary bit is on, the packets needs to be droped.
+                */
+               if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+                    (RX_FIRST_DESC | RX_LAST_DESC))
+                   || (cmd_sts & RX_ERROR)) {
+
+                       stats->rx_dropped++;
+                       if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+                           (RX_FIRST_DESC | RX_LAST_DESC)) {
+                               if (net_ratelimit())
+                                       printk(KERN_ERR
+                                              "%s: Rx pkt on multiple desc\n",
+                                              dev->name);
+                       }
+                       if (cmd_sts & RX_ERROR)
+                               stats->rx_errors++;
+                       dev_kfree_skb_irq(skb);
+               } else {
+                       /*
+                        * The -4 is for the CRC in the trailer of the
+                        * received packet
+                        */
+                       skb_put(skb, rx_desc->byte_cnt - 4);
+                       skb->protocol = eth_type_trans(skb, dev);
+                       netif_receive_skb(skb);
+               }
+               dev->last_rx = jiffies;
+       }
+       /* Fill RX ring with skb's */
+       rxq_refill(dev);
+       return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+                                    struct net_device *dev)
+{
+       u32 icr;
+       int ret = 0;
+
+       icr = rdl(pep, INT_CAUSE);
+       if (icr == 0)
+               return IRQ_NONE;
+
+       wrl(pep, INT_CAUSE, ~icr);
+       if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+               pep->work_todo |= WORK_TX_DONE;
+               ret = 1;
+       }
+       if (icr & ICR_RXBUF)
+               ret = 1;
+       if (icr & ICR_MII_CH) {
+               pep->work_todo |= WORK_LINK;
+               ret = 1;
+       }
+       return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+       struct net_device *dev = pep->dev;
+       u32 port_status;
+       int speed;
+       int duplex;
+       int fc;
+
+       port_status = rdl(pep, PORT_STATUS);
+       if (!(port_status & LINK_UP)) {
+               if (netif_carrier_ok(dev)) {
+                       printk(KERN_INFO "%s: link down\n", dev->name);
+                       netif_carrier_off(dev);
+                       txq_reclaim(dev, 1);
+               }
+               return;
+       }
+       if (port_status & PORT_SPEED_100)
+               speed = 100;
+       else
+               speed = 10;
+
+       duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+       fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+       printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+              "flow control %sabled\n", dev->name,
+              speed, duplex ? "full" : "half", fc ? "en" : "dis");
+       if (!netif_carrier_ok(dev))
+               netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+               return IRQ_NONE;
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       napi_schedule(&pep->napi);
+       return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+       int skb_size;
+
+       /*
+        * Reserve 2+14 bytes for an ethernet header (the hardware
+        * automatically prepends 2 bytes of dummy data to each
+        * received packet), 16 bytes for up to four VLAN tags, and
+        * 4 bytes for the trailing FCS -- 36 bytes total.
+        */
+       skb_size = pep->dev->mtu + 36;
+
+       /*
+        * Make sure that the skb size is a multiple of 8 bytes, as
+        * the lower three bits of the receive descriptor's buffer
+        * size field are ignored by the hardware.
+        */
+       pep->skb_size = (skb_size + 7) & ~7;
+
+       /*
+        * If NET_SKB_PAD is smaller than a cache line,
+        * netdev_alloc_skb() will cause skb->data to be misaligned
+        * to a cache line boundary.  If this is the case, include
+        * some extra space to allow re-aligning the data area.
+        */
+       pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+       int skb_size;
+
+       pxa168_eth_recalc_skb_size(pep);
+       if  (pep->skb_size <= 1518)
+               skb_size = PCXR_MFL_1518;
+       else if (pep->skb_size <= 1536)
+               skb_size = PCXR_MFL_1536;
+       else if (pep->skb_size <= 2048)
+               skb_size = PCXR_MFL_2048;
+       else
+               skb_size = PCXR_MFL_64K;
+
+       /* Extended Port Configuration */
+       wrl(pep,
+           PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+           PCXR_DSCP_EN |               /* Enable DSCP in IP */
+           skb_size | PCXR_FLP |        /* do not force link pass */
+           PCXR_TX_HIGH_PRI);           /* Transmit - high priority queue */
+
+       return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+       int err = 0;
+
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       wrl(pep, INT_CAUSE, 0);
+       /* Write to ICR to clear interrupts. */
+       wrl(pep, INT_W_CLEAR, 0);
+       /* Abort any transmit and receive operations and put DMA
+        * in idle state.
+        */
+       abort_dma(pep);
+       /* Initialize address hash table */
+       err = init_hash_table(pep);
+       if (err)
+               return err;
+       /* SDMA configuration */
+       wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |       /* Burst size = 32 bytes */
+           SDCR_RIFB |                         /* Rx interrupt on frame */
+           SDCR_BLMT |                         /* Little endian transmit */
+           SDCR_BLMR |                         /* Little endian receive */
+           SDCR_RC_MAX_RETRANS);               /* Max retransmit count */
+       /* Port Configuration */
+       wrl(pep, PORT_CONFIG, PCR_HS);          /* Hash size is 1/2kb */
+       set_port_config_ext(pep);
+
+       return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct rx_desc *p_rx_desc;
+       int size = 0, i = 0;
+       int rx_desc_num = pep->rx_ring_size;
+
+       /* Allocate RX skb rings */
+       pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+                            GFP_KERNEL);
+       if (!pep->rx_skb) {
+               printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+               return -ENOMEM;
+       }
+       /* Allocate RX ring */
+       pep->rx_desc_count = 0;
+       size = pep->rx_ring_size * sizeof(struct rx_desc);
+       pep->rx_desc_area_size = size;
+       pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+                                               &pep->rx_desc_dma, GFP_KERNEL);
+       if (!pep->p_rx_desc_area) {
+               printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+                      dev->name, size);
+               goto out;
+       }
+       memset((void *)pep->p_rx_desc_area, 0, size);
+       /* initialize the next_desc_ptr links in the Rx descriptors ring */
+       p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+       for (i = 0; i < rx_desc_num; i++) {
+               p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+                   ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+       }
+       /* Save Rx desc pointer to driver struct. */
+       pep->rx_curr_desc_q = 0;
+       pep->rx_used_desc_q = 0;
+       pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+       return 0;
+out:
+       kfree(pep->rx_skb);
+       return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int curr;
+
+       /* Free preallocated skb's on RX rings */
+       for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+               if (pep->rx_skb[curr]) {
+                       dev_kfree_skb(pep->rx_skb[curr]);
+                       pep->rx_desc_count--;
+               }
+       }
+       if (pep->rx_desc_count)
+               printk(KERN_ERR
+                      "Error in freeing Rx Ring. %d skb's still\n",
+                      pep->rx_desc_count);
+       /* Free RX ring */
+       if (pep->p_rx_desc_area)
+               dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+                                 pep->p_rx_desc_area, pep->rx_desc_dma);
+       kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct tx_desc *p_tx_desc;
+       int size = 0, i = 0;
+       int tx_desc_num = pep->tx_ring_size;
+
+       pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+                            GFP_KERNEL);
+       if (!pep->tx_skb) {
+               printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+               return -ENOMEM;
+       }
+       /* Allocate TX ring */
+       pep->tx_desc_count = 0;
+       size = pep->tx_ring_size * sizeof(struct tx_desc);
+       pep->tx_desc_area_size = size;
+       pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+                                               &pep->tx_desc_dma, GFP_KERNEL);
+       if (!pep->p_tx_desc_area) {
+               printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+                      dev->name, size);
+               goto out;
+       }
+       memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+       /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+       p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+       for (i = 0; i < tx_desc_num; i++) {
+               p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+                   ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+       }
+       pep->tx_curr_desc_q = 0;
+       pep->tx_used_desc_q = 0;
+       pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+       return 0;
+out:
+       kfree(pep->tx_skb);
+       return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       /* Free outstanding skb's on TX ring */
+       txq_reclaim(dev, 1);
+       BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+       /* Free TX ring */
+       if (pep->p_tx_desc_area)
+               dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+                                 pep->p_tx_desc_area, pep->tx_desc_dma);
+       kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int err;
+
+       err = request_irq(dev->irq, pxa168_eth_int_handler,
+                         IRQF_DISABLED, dev->name, dev);
+       if (err) {
+               dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+               return -EAGAIN;
+       }
+       pep->rx_resource_err = 0;
+       err = rxq_init(dev);
+       if (err != 0)
+               goto out_free_irq;
+       err = txq_init(dev);
+       if (err != 0)
+               goto out_free_rx_skb;
+       pep->rx_used_desc_q = 0;
+       pep->rx_curr_desc_q = 0;
+
+       /* Fill RX ring with skb's */
+       rxq_refill(dev);
+       pep->rx_used_desc_q = 0;
+       pep->rx_curr_desc_q = 0;
+       netif_carrier_off(dev);
+       eth_port_start(dev);
+       napi_enable(&pep->napi);
+       return 0;
+out_free_rx_skb:
+       rxq_deinit(dev);
+out_free_irq:
+       free_irq(dev->irq, dev);
+       return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       eth_port_reset(dev);
+
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       wrl(pep, INT_CAUSE, 0);
+       /* Write to ICR to clear interrupts. */
+       wrl(pep, INT_W_CLEAR, 0);
+       napi_disable(&pep->napi);
+       del_timer_sync(&pep->timeout);
+       netif_carrier_off(dev);
+       free_irq(dev->irq, dev);
+       rxq_deinit(dev);
+       txq_deinit(dev);
+
+       return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+       int retval;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if ((mtu > 9500) || (mtu < 68))
+               return -EINVAL;
+
+       dev->mtu = mtu;
+       retval = set_port_config_ext(pep);
+
+       if (!netif_running(dev))
+               return 0;
+
+       /*
+        * Stop and then re-open the interface. This will allocate RX
+        * skbs of the new MTU.
+        * There is a possible danger that the open will not succeed,
+        * due to memory being full.
+        */
+       pxa168_eth_stop(dev);
+       if (pxa168_eth_open(dev)) {
+               dev_printk(KERN_ERR, &dev->dev,
+                          "fatal error on re-opening device after "
+                          "MTU change\n");
+       }
+
+       return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+       int tx_desc_curr;
+
+       tx_desc_curr = pep->tx_curr_desc_q;
+       pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+       BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+       pep->tx_desc_count++;
+
+       return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct pxa168_eth_private *pep =
+           container_of(napi, struct pxa168_eth_private, napi);
+       struct net_device *dev = pep->dev;
+       int work_done = 0;
+
+       if (unlikely(pep->work_todo & WORK_LINK)) {
+               pep->work_todo &= ~(WORK_LINK);
+               handle_link_event(pep);
+       }
+       /*
+        * We call txq_reclaim every time since in NAPI interupts are disabled
+        * and due to this we miss the TX_DONE interrupt,which is not updated in
+        * interrupt status register.
+        */
+       txq_reclaim(dev, 0);
+       if (netif_queue_stopped(dev)
+           && pep->tx_ring_size - pep->tx_desc_count > 1) {
+               netif_wake_queue(dev);
+       }
+       work_done = rxq_process(dev, budget);
+       if (work_done < budget) {
+               napi_complete(napi);
+               wrl(pep, INT_MASK, ALL_INTS);
+       }
+
+       return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct tx_desc *desc;
+       int tx_index;
+       int length;
+
+       tx_index = eth_alloc_tx_desc_index(pep);
+       desc = &pep->p_tx_desc_area[tx_index];
+       length = skb->len;
+       pep->tx_skb[tx_index] = skb;
+       desc->byte_cnt = length;
+       desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+       wmb();
+       desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+                       TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+       wmb();
+       wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+       stats->tx_bytes += skb->len;
+       stats->tx_packets++;
+       dev->trans_start = jiffies;
+       if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+               /* We handled the current skb, but now we are out of space.*/
+               netif_stop_queue(dev);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+       int i = 0;
+
+       /* wait for the SMI register to become available */
+       for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+               if (i == PHY_WAIT_ITERATIONS)
+                       return -ETIMEDOUT;
+               msleep(10);
+       }
+
+       return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+       struct pxa168_eth_private *pep = bus->priv;
+       int i = 0;
+       int val;
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+       wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+       /* now wait for the data to be valid */
+       for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+               if (i == PHY_WAIT_ITERATIONS) {
+                       printk(KERN_WARNING
+                               "pxa168_eth: SMI bus read not valid\n");
+                       return -ENODEV;
+               }
+               msleep(10);
+       }
+
+       return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+                           u16 value)
+{
+       struct pxa168_eth_private *pep = bus->priv;
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+           SMI_OP_W | (value & 0xffff));
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+                              int cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       if (pep->phy != NULL)
+               return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+       return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+       struct mii_bus *bus = pep->smi_bus;
+       struct phy_device *phydev;
+       int start;
+       int num;
+       int i;
+
+       if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+               /* Scan entire range */
+               start = ethernet_phy_get(pep);
+               num = 32;
+       } else {
+               /* Use phy addr specific to platform */
+               start = phy_addr & 0x1f;
+               num = 1;
+       }
+       phydev = NULL;
+       for (i = 0; i < num; i++) {
+               int addr = (start + i) & 0x1f;
+               if (bus->phy_map[addr] == NULL)
+                       mdiobus_scan(bus, addr);
+
+               if (phydev == NULL) {
+                       phydev = bus->phy_map[addr];
+                       if (phydev != NULL)
+                               ethernet_phy_set_addr(pep, addr);
+               }
+       }
+
+       return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+       struct phy_device *phy = pep->phy;
+       ethernet_phy_reset(pep);
+
+       phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+       if (speed == 0) {
+               phy->autoneg = AUTONEG_ENABLE;
+               phy->speed = 0;
+               phy->duplex = 0;
+               phy->supported &= PHY_BASIC_FEATURES;
+               phy->advertising = phy->supported | ADVERTISED_Autoneg;
+       } else {
+               phy->autoneg = AUTONEG_DISABLE;
+               phy->advertising = 0;
+               phy->speed = speed;
+               phy->duplex = duplex;
+       }
+       phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (pep->pd->init)
+               pep->pd->init();
+       pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+       if (pep->phy != NULL)
+               phy_init(pep, pep->pd->speed, pep->pd->duplex);
+       update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+       return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int err;
+
+       err = phy_read_status(pep->phy);
+       if (err == 0)
+               err = phy_ethtool_gset(pep->phy, cmd);
+
+       return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       strncpy(info->driver, DRIVER_NAME, 32);
+       strncpy(info->version, DRIVER_VERSION, 32);
+       strncpy(info->fw_version, "N/A", 32);
+       strncpy(info->bus_info, "N/A", 32);
+}
+
+static u32 pxa168_get_link(struct net_device *dev)
+{
+       return !!netif_carrier_ok(dev);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+       .get_settings = pxa168_get_settings,
+       .set_settings = pxa168_set_settings,
+       .get_drvinfo = pxa168_get_drvinfo,
+       .get_link = pxa168_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+       .ndo_open = pxa168_eth_open,
+       .ndo_stop = pxa168_eth_stop,
+       .ndo_start_xmit = pxa168_eth_start_xmit,
+       .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+       .ndo_set_mac_address = pxa168_eth_set_mac_address,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_do_ioctl = pxa168_eth_do_ioctl,
+       .ndo_change_mtu = pxa168_eth_change_mtu,
+       .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+       struct pxa168_eth_private *pep = NULL;
+       struct net_device *dev = NULL;
+       struct resource *res;
+       struct clk *clk;
+       int err;
+
+       printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+       clk = clk_get(&pdev->dev, "MFUCLK");
+       if (IS_ERR(clk)) {
+               printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+                       DRIVER_NAME);
+               return -ENODEV;
+       }
+       clk_enable(clk);
+
+       dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_clk;
+       }
+
+       platform_set_drvdata(pdev, dev);
+       pep = netdev_priv(dev);
+       pep->dev = dev;
+       pep->clk = clk;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res == NULL) {
+               err = -ENODEV;
+               goto err_netdev;
+       }
+       pep->base = ioremap(res->start, res->end - res->start + 1);
+       if (pep->base == NULL) {
+               err = -ENOMEM;
+               goto err_netdev;
+       }
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       BUG_ON(!res);
+       dev->irq = res->start;
+       dev->netdev_ops = &pxa168_eth_netdev_ops;
+       dev->watchdog_timeo = 2 * HZ;
+       dev->base_addr = 0;
+       SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+       INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+       printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+       random_ether_addr(dev->dev_addr);
+
+       pep->pd = pdev->dev.platform_data;
+       pep->rx_ring_size = NUM_RX_DESCS;
+       if (pep->pd->rx_queue_size)
+               pep->rx_ring_size = pep->pd->rx_queue_size;
+
+       pep->tx_ring_size = NUM_TX_DESCS;
+       if (pep->pd->tx_queue_size)
+               pep->tx_ring_size = pep->pd->tx_queue_size;
+
+       pep->port_num = pep->pd->port_number;
+       /* Hardware supports only 3 ports */
+       BUG_ON(pep->port_num > 2);
+       netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+       memset(&pep->timeout, 0, sizeof(struct timer_list));
+       init_timer(&pep->timeout);
+       pep->timeout.function = rxq_refill_timer_wrapper;
+       pep->timeout.data = (unsigned long)pep;
+
+       pep->smi_bus = mdiobus_alloc();
+       if (pep->smi_bus == NULL) {
+               err = -ENOMEM;
+               goto err_base;
+       }
+       pep->smi_bus->priv = pep;
+       pep->smi_bus->name = "pxa168_eth smi";
+       pep->smi_bus->read = pxa168_smi_read;
+       pep->smi_bus->write = pxa168_smi_write;
+       snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+       pep->smi_bus->parent = &pdev->dev;
+       pep->smi_bus->phy_mask = 0xffffffff;
+       err = mdiobus_register(pep->smi_bus);
+       if (err)
+               goto err_free_mdio;
+
+       pxa168_init_hw(pep);
+       err = ethernet_phy_setup(dev);
+       if (err)
+               goto err_mdiobus;
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       err = register_netdev(dev);
+       if (err)
+               goto err_mdiobus;
+       return 0;
+
+err_mdiobus:
+       mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+       mdiobus_free(pep->smi_bus);
+err_base:
+       iounmap(pep->base);
+err_netdev:
+       free_netdev(dev);
+err_clk:
+       clk_disable(clk);
+       clk_put(clk);
+       return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (pep->htpr) {
+               dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+                                 pep->htpr, pep->htpr_dma);
+               pep->htpr = NULL;
+       }
+       if (pep->clk) {
+               clk_disable(pep->clk);
+               clk_put(pep->clk);
+               pep->clk = NULL;
+       }
+       if (pep->phy != NULL)
+               phy_detach(pep->phy);
+
+       iounmap(pep->base);
+       pep->base = NULL;
+       mdiobus_unregister(pep->smi_bus);
+       mdiobus_free(pep->smi_bus);
+       unregister_netdev(dev);
+       flush_scheduled_work();
+       free_netdev(dev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+       return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+       .probe = pxa168_eth_probe,
+       .remove = pxa168_eth_remove,
+       .shutdown = pxa168_eth_shutdown,
+       .resume = pxa168_eth_resume,
+       .suspend = pxa168_eth_suspend,
+       .driver = {
+                  .name = DRIVER_NAME,
+                  },
+};
+
+static int __init pxa168_init_module(void)
+{
+       return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+       platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
index bf6d87adda4fff7a5aa1d1e20728383d08e621f1..66eea59720209f85278cf223b6d723f879337aac 100644 (file)
@@ -1983,8 +1983,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
-       memset(stats, 0, sizeof(*stats));
-
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2190,9 +2188,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev)
 {
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
        disable_irq(adapter->irq);
-       qlcnic_intr(adapter->irq, adapter);
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               qlcnic_intr(adapter->irq, sds_ring);
+       }
        enable_irq(adapter->irq);
 }
 #endif
index 8d63f69b27d912de7dca94d980276963f25c7fe7..5f89e83501f4f5c1892ac15a84681a76ade08c19 100644 (file)
@@ -3919,12 +3919,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
        for (i = 0; i < qdev->rss_ring_count; i++)
                netif_napi_del(&qdev->rx_ring[i].napi);
 
-       ql_free_rx_buffers(qdev);
-
        status = ql_adapter_reset(qdev);
        if (status)
                netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
                          qdev->func);
+       ql_free_rx_buffers(qdev);
+
        return status;
 }
 
index f5a9eb1df59332f0d105d7ee83380471a0ff48f1..79fd02bc69fd0854f14e0fdac452980cd7fed74b 100644 (file)
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
 
 static int sh_eth_drv_probe(struct platform_device *pdev)
 {
-       int ret, i, devno = 0;
+       int ret, devno = 0;
        struct resource *res;
        struct net_device *ndev = NULL;
        struct sh_eth_private *mdp;
index 08e7b6abacdd29f3fc78678834ed54feeabe6c6d..8ed30fa35d0a5d789121eb3042c4b87a5e53a7f2 100644 (file)
@@ -58,6 +58,7 @@
 #define USB_PRODUCT_IPHONE      0x1290
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
+#define USB_PRODUCT_IPHONE_4   0x1297
 
 #define IPHETH_USBINTF_CLASS    255
 #define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, ipheth_table);
index a105087af9639884c1ca773ba96cf096fdde7954..f9aa1bc0a94756465bbd4d8ac7660cb3b95718c7 100644 (file)
@@ -732,7 +732,7 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
 
        /* Nothing to do for ADMtek BBP */
        } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
-               wiphy_debug(dev->wiphy, "unsupported bbp type %d\n",
+               wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
                            priv->bbp_type);
 
        ADM8211_RESTORE();
@@ -1032,7 +1032,7 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
                        break;
                }
        } else
-               wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type);
+               wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
 
        ADM8211_CSR_WRITE(SYNRF, 0);
 
@@ -1525,7 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
        retval = request_irq(priv->pdev->irq, adm8211_interrupt,
                             IRQF_SHARED, "adm8211", dev);
        if (retval) {
-               wiphy_err(dev->wiphy, "failed to register irq handler\n");
+               wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
                goto fail;
        }
 
@@ -1902,7 +1902,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
                goto err_free_eeprom;
        }
 
-       wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n",
+       wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
                   dev->wiphy->perm_addr, pdev->revision);
 
        return 0;
index d5140a87f0737d92adde6ce9bb725d679be16538..1128fa8c9ed5ef3d0b9be5191edc4bd7320baf90 100644 (file)
@@ -655,7 +655,7 @@ static int at76_get_hw_config(struct at76_priv *priv)
 exit:
        kfree(hwcfg);
        if (ret < 0)
-               wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n",
+               wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n",
                          ret);
 
        return ret;
@@ -960,7 +960,7 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
                           sizeof(struct mib_mac_addr));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac_addr) failed: %d\n", ret);
+                         "at76_get_mib (MAC_ADDR) failed: %d\n", ret);
                goto exit;
        }
 
@@ -989,7 +989,7 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
                           sizeof(struct mib_mac_wep));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac_wep) failed: %d\n", ret);
+                         "at76_get_mib (MAC_WEP) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1026,7 +1026,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
                           sizeof(struct mib_mac_mgmt));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac_mgmt) failed: %d\n", ret);
+                         "at76_get_mib (MAC_MGMT) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1062,7 +1062,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
        ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac) failed: %d\n", ret);
+                         "at76_get_mib (MAC) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1099,7 +1099,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
        ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (phy) failed: %d\n", ret);
+                         "at76_get_mib (PHY) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1132,7 +1132,7 @@ static void at76_dump_mib_local(struct at76_priv *priv)
        ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (local) failed: %d\n", ret);
+                         "at76_get_mib (LOCAL) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1158,7 +1158,7 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
                           sizeof(struct mib_mdomain));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mdomain) failed: %d\n", ret);
+                         "at76_get_mib (MDOMAIN) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1229,7 +1229,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
        struct sk_buff *skb = priv->rx_skb;
 
        if (!priv->rx_urb) {
-               wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n",
+               wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n",
                          __func__);
                return -EFAULT;
        }
@@ -1792,7 +1792,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
                if (ret == -EINVAL)
                        wiphy_err(priv->hw->wiphy,
-                                 "-einval: tx urb %p hcpriv %p complete %p\n",
+                                 "-EINVAL: tx urb %p hcpriv %p complete %p\n",
                                  priv->tx_urb,
                                  priv->tx_urb->hcpriv, priv->tx_urb->complete);
        }
@@ -2310,7 +2310,7 @@ static int at76_init_new_device(struct at76_priv *priv,
 
        priv->mac80211_registered = 1;
 
-       wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n",
+       wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
                   dev_name(&interface->dev), priv->mac_addr,
                   priv->fw_version.major, priv->fw_version.minor,
                   priv->fw_version.patch, priv->fw_version.build);
index c67b05f3bcbdcace54e806ccdf69de5793766d0c..debfb0fbc7c5e7316931b2e0ac0c4913ff7428d6 100644 (file)
@@ -245,7 +245,7 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
 {
        int i;
 
-       wiphy_debug(ar->hw->wiphy, "qos queue stats\n");
+       wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
 
        for (i = 0; i < __AR9170_NUM_TXQ; i++)
                wiphy_debug(ar->hw->wiphy,
@@ -387,7 +387,7 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
                if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
 #ifdef AR9170_QUEUE_DEBUG
                        wiphy_debug(ar->hw->wiphy,
-                                   "skip frame => da %pm != %pm\n",
+                                   "skip frame => DA %pM != %pM\n",
                                    mac, ieee80211_get_DA(hdr));
                        ar9170_print_txheader(ar, skb);
 #endif /* AR9170_QUEUE_DEBUG */
index 373dcfec689c40c2bf1be9af2b9bddb7a15dcacb..d77ce9906b6ccb1bf1031b1260dfd2510fd9e116 100644 (file)
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
                        PCI_DMA_TODEVICE);
 
        rate = ieee80211_get_tx_rate(sc->hw, info);
+       if (!rate) {
+               ret = -EINVAL;
+               goto err_unmap;
+       }
 
        if (info->flags & IEEE80211_TX_CTL_NO_ACK)
                flags |= AR5K_TXDESC_NOACK;
index b883b174385b822e98cb46e0f83b8024159cf18c..057fb69ddf7fbc56e76b010a1cc21833fdc2b078 100644 (file)
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
                length = block[it+1];
                length &= 0xff;
 
-               if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+               if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
                        ath_print(common, ATH_DBG_EEPROM,
                                  "Restore at %d: spot=%d "
                                  "offset=%d length=%d\n",
index 7f48df1e2903008f0a68bf991817bee64edf5b81..0b09db0f8e7d99b1c22c94803884a67043e2187e 100644 (file)
@@ -62,7 +62,7 @@
 
 #define SD_NO_CTL               0xE0
 #define NO_CTL                  0xff
-#define CTL_MODE_M              7
+#define CTL_MODE_M              0xf
 #define CTL_11A                 0
 #define CTL_11B                 1
 #define CTL_11G                 2
index a1c39526161a9d663964638ebdc4206e95928b8a..345dd9721b415972d82d8309ce04aeec6c6bbc37 100644 (file)
@@ -31,7 +31,6 @@ enum ctl_group {
 #define NO_CTL 0xff
 #define SD_NO_CTL               0xE0
 #define NO_CTL                  0xff
-#define CTL_MODE_M              7
 #define CTL_11A                 0
 #define CTL_11B                 1
 #define CTL_11G                 2
index 1189dbb6e2a62abcdc090ca5eea61a9eb5824aa7..996e9d7d7586b240847526bec92e721ef8a20d9c 100644 (file)
@@ -2723,14 +2723,6 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
 
                packet = &priv->rx_buffers[i];
 
-               /* Sync the DMA for the STATUS buffer so CPU is sure to get
-                * the correct values */
-               pci_dma_sync_single_for_cpu(priv->pci_dev,
-                                           sq->nic +
-                                           sizeof(struct ipw2100_status) * i,
-                                           sizeof(struct ipw2100_status),
-                                           PCI_DMA_FROMDEVICE);
-
                /* Sync the DMA for the RX buffer so CPU is sure to get
                 * the correct values */
                pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
index fec026212326dce857220196dd312c3b2d5b6c69..0b779a41a1426b9153a55f30a59a052a9e66f618 100644 (file)
@@ -265,7 +265,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 128,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -297,7 +297,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 128,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
index 6950a783913b83561839f20a08657857ee242a86..8ccfcd08218d894895524c3995fca28f2695aa6c 100644 (file)
@@ -2731,7 +2731,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
        .led_compensation = 64,
        .broken_powersave = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .tx_power_by_driver = true,
 };
@@ -2752,7 +2752,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
        .led_compensation = 64,
        .broken_powersave = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .tx_power_by_driver = true,
 };
index d6da356608fa46efe34a317bdda60c6680b65d7e..d92b729092336523a5c0a2a541bfbd7cb1b4b640 100644 (file)
@@ -2322,7 +2322,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
        .led_compensation = 61,
        .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .temperature_kelvin = true,
        .max_event_log_size = 512,
        .tx_power_by_driver = true,
index aacf3770f075b4abd1de4a474f79cc60eb42c9cc..48bdcd8d2e94c3355c877a55d8e7815249350fdc 100644 (file)
@@ -510,7 +510,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -541,7 +541,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -601,7 +601,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -632,7 +632,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -663,7 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -693,7 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
index af4fd50f3405db334ce2e44e1ce648c7c510ed7c..cee06b968de807a642adb4db4e08e62cfe2237ab 100644 (file)
@@ -388,7 +388,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -424,7 +424,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -459,7 +459,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -496,7 +496,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -532,7 +532,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -606,7 +606,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -644,7 +644,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -680,7 +680,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -721,7 +721,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -756,7 +756,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -791,7 +791,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -828,7 +828,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1500,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -866,7 +866,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1500,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -902,7 +902,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1500,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -940,7 +940,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
index c1882fd8345d43dd630718c1f9475533511e5f7c..10d7b9b7f064f529af9d0cdc6aa2b0a144d9438d 100644 (file)
@@ -3667,6 +3667,49 @@ out_exit:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+                                   unsigned int changed_flags,
+                                   unsigned int *total_flags,
+                                   u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       priv->staging_rxon.filter_flags &= ~filter_nand;
+       priv->staging_rxon.filter_flags |= filter_or;
+
+       iwlcore_commit_rxon(priv);
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
 static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct iwl_priv *priv = hw->priv;
@@ -3867,7 +3910,7 @@ static struct ieee80211_ops iwl_hw_ops = {
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
        .config = iwl_mac_config,
-       .configure_filter = iwl_configure_filter,
+       .configure_filter = iwlagn_configure_filter,
        .set_key = iwl_mac_set_key,
        .update_tkip_key = iwl_mac_update_tkip_key,
        .conf_tx = iwl_mac_conf_tx,
index 2c03c6e20a72d73ded70f8c96d37bfce6aa8e263..07dbc27964480eebedb67760029dcadab9584f84 100644 (file)
@@ -1328,51 +1328,6 @@ out:
 EXPORT_SYMBOL(iwl_apm_init);
 
 
-
-void iwl_configure_filter(struct ieee80211_hw *hw,
-                         unsigned int changed_flags,
-                         unsigned int *total_flags,
-                         u64 multicast)
-{
-       struct iwl_priv *priv = hw->priv;
-       __le32 filter_or = 0, filter_nand = 0;
-
-#define CHK(test, flag)        do { \
-       if (*total_flags & (test))              \
-               filter_or |= (flag);            \
-       else                                    \
-               filter_nand |= (flag);          \
-       } while (0)
-
-       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-                       changed_flags, *total_flags);
-
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
-       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-       mutex_lock(&priv->mutex);
-
-       priv->staging_rxon.filter_flags &= ~filter_nand;
-       priv->staging_rxon.filter_flags |= filter_or;
-
-       iwlcore_commit_rxon(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       /*
-        * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_connection_init_rx_config()
-        * since we currently do not support programming multicast
-        * filters into the device.
-        */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-EXPORT_SYMBOL(iwl_configure_filter);
-
 int iwl_set_hw_params(struct iwl_priv *priv)
 {
        priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
index 4a71dfb10a15d0f6ebf4007944d011d249333902..5e6ee3da6bbf740846033e4a57e44956e326a73c 100644 (file)
@@ -372,9 +372,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
                           u32 decrypt_res,
                           struct ieee80211_rx_status *stats);
 void iwl_irq_handle_error(struct iwl_priv *priv);
-void iwl_configure_filter(struct ieee80211_hw *hw,
-                         unsigned int changed_flags,
-                         unsigned int *total_flags, u64 multicast);
 int iwl_set_hw_params(struct iwl_priv *priv);
 void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
 void iwl_bss_info_changed(struct ieee80211_hw *hw,
index f35bcad56e36bd68779edb3acf930d6b7595359a..2e97cd2fa98a6e0c7d9cdc6d98582ac54fe28459 100644 (file)
@@ -1049,7 +1049,8 @@ struct iwl_event_log {
 #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
 
 /* timer constants use to monitor and recover stuck tx queues in mSecs */
-#define IWL_MONITORING_PERIOD  (1000)
+#define IWL_DEF_MONITORING_PERIOD      (1000)
+#define IWL_LONG_MONITORING_PERIOD     (5000)
 #define IWL_ONE_HUNDRED_MSECS   (100)
 #define IWL_SIXTY_SECS          (60000)
 
index 70c4b8fba0ee89093c56c056e7152b927da5a37c..59a308b02f95fdc077a84a7d12d71db7451b1816 100644 (file)
@@ -3391,6 +3391,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
 
        return 0;
 }
+
+static void iwl3945_configure_filter(struct ieee80211_hw *hw,
+                                    unsigned int changed_flags,
+                                    unsigned int *total_flags,
+                                    u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       priv->staging_rxon.filter_flags &= ~filter_nand;
+       priv->staging_rxon.filter_flags |= filter_or;
+
+       /*
+        * Committing directly here breaks for some reason,
+        * but we'll eventually commit the filter flags
+        * change anyway.
+        */
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+
 /*****************************************************************************
  *
  * sysfs attributes
@@ -3796,7 +3845,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
        .config = iwl_mac_config,
-       .configure_filter = iwl_configure_filter,
+       .configure_filter = iwl3945_configure_filter,
        .set_key = iwl3945_mac_set_key,
        .conf_tx = iwl_mac_conf_tx,
        .reset_tsf = iwl_mac_reset_tsf,
index ba854c70ab9455b8e18a63fa254e18671f447232..87b634978b357797ed6ceb22f7dec9643f6c7054 100644 (file)
@@ -128,7 +128,7 @@ struct if_sdio_card {
        bool                    helper_allocated;
        bool                    firmware_allocated;
 
-       u8                      buffer[65536];
+       u8                      buffer[65536] __attribute__((aligned(4)));
 
        spinlock_t              lock;
        struct if_sdio_packet   *packets;
index 01ad7f77383a8f18a58a69dc6648ba907618e069..86fa8abdd66fda1cba26ef0e2bfe90a249d14128 100644 (file)
@@ -486,7 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
        struct ieee80211_rx_status rx_status;
 
        if (data->idle) {
-               wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n");
+               wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
                return false;
        }
 
index d761ed2d8af46b8d8be19eab01101dc511321b00..f152a25be59f7020998d35ceb72ba55100ebff2b 100644 (file)
@@ -910,14 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
 
        rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
        if (rxq->rxd == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n");
+               wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
                return -ENOMEM;
        }
        memset(rxq->rxd, 0, size);
 
        rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
        if (rxq->buf == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n");
+               wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
                pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
                return -ENOMEM;
        }
@@ -1145,14 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
 
        txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
        if (txq->txd == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n");
+               wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
                return -ENOMEM;
        }
        memset(txq->txd, 0, size);
 
        txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
        if (txq->skb == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n");
+               wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
                pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
                return -ENOMEM;
        }
@@ -1573,7 +1573,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
                                        PCI_DMA_BIDIRECTIONAL);
 
        if (!timeout) {
-               wiphy_err(hw->wiphy, "command %s timeout after %u ms\n",
+               wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
                          mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
                          MWL8K_CMD_TIMEOUT_MS);
                rc = -ETIMEDOUT;
@@ -1584,11 +1584,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 
                rc = cmd->result ? -EINVAL : 0;
                if (rc)
-                       wiphy_err(hw->wiphy, "command %s error 0x%x\n",
+                       wiphy_err(hw->wiphy, "Command %s error 0x%x\n",
                                  mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
                                  le16_to_cpu(cmd->result));
                else if (ms > 2000)
-                       wiphy_notice(hw->wiphy, "command %s took %d ms\n",
+                       wiphy_notice(hw->wiphy, "Command %s took %d ms\n",
                                     mwl8k_cmd_name(cmd->code,
                                                    buf, sizeof(buf)),
                                     ms);
@@ -3210,7 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
        rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
                         IRQF_SHARED, MWL8K_NAME, hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "failed to register irq handler\n");
+               wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
                return -EIO;
        }
 
@@ -3926,7 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 
        priv->sram = pci_iomap(pdev, 0, 0x10000);
        if (priv->sram == NULL) {
-               wiphy_err(hw->wiphy, "cannot map device sram\n");
+               wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
                goto err_iounmap;
        }
 
@@ -3938,7 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        if (priv->regs == NULL) {
                priv->regs = pci_iomap(pdev, 2, 0x10000);
                if (priv->regs == NULL) {
-                       wiphy_err(hw->wiphy, "cannot map device registers\n");
+                       wiphy_err(hw->wiphy, "Cannot map device registers\n");
                        goto err_iounmap;
                }
        }
@@ -3950,14 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        /* Ask userland hotplug daemon for the device firmware */
        rc = mwl8k_request_firmware(priv);
        if (rc) {
-               wiphy_err(hw->wiphy, "firmware files not found\n");
+               wiphy_err(hw->wiphy, "Firmware files not found\n");
                goto err_stop_firmware;
        }
 
        /* Load firmware into hardware */
        rc = mwl8k_load_firmware(hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot start firmware\n");
+               wiphy_err(hw->wiphy, "Cannot start firmware\n");
                goto err_stop_firmware;
        }
 
@@ -4047,7 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
                         IRQF_SHARED, MWL8K_NAME, hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "failed to register irq handler\n");
+               wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
                goto err_free_queues;
        }
 
@@ -4067,7 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
                rc = mwl8k_cmd_get_hw_spec_sta(hw);
        }
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot initialise firmware\n");
+               wiphy_err(hw->wiphy, "Cannot initialise firmware\n");
                goto err_free_irq;
        }
 
@@ -4081,14 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        /* Turn radio off */
        rc = mwl8k_cmd_radio_disable(hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot disable\n");
+               wiphy_err(hw->wiphy, "Cannot disable\n");
                goto err_free_irq;
        }
 
        /* Clear MAC address */
        rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot clear mac address\n");
+               wiphy_err(hw->wiphy, "Cannot clear MAC address\n");
                goto err_free_irq;
        }
 
@@ -4098,7 +4098,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 
        rc = ieee80211_register_hw(hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot register device\n");
+               wiphy_err(hw->wiphy, "Cannot register device\n");
                goto err_free_queues;
        }
 
index d687cb7f2a599c28d04d6c875f5b4b2796604cdf..78347041ec40a3c51d094328495c8d86cb5c5a3b 100644 (file)
@@ -167,7 +167,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
        }
 
        if (j == 0) {
-               wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n",
+               wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
                          (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
 
                ret = -ENODATA;
@@ -695,12 +695,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
                u8 perm_addr[ETH_ALEN];
 
                wiphy_warn(dev->wiphy,
-                          "invalid hwaddr! using randomly generated mac addr\n");
+                          "Invalid hwaddr! Using randomly generated MAC addr\n");
                random_ether_addr(perm_addr);
                SET_IEEE80211_PERM_ADDR(dev, perm_addr);
        }
 
-       wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n",
+       wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
                   dev->wiphy->perm_addr, priv->version,
                   p54_rf_chips[priv->rxhw]);
 
index 47006bca485216609afb0f51a201c4d2ef1ef04a..15b20c29a6042ae6e3dc5c99dae07e8c317127ca 100644 (file)
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
 
        if (fw_version)
                wiphy_info(priv->hw->wiphy,
-                          "fw rev %s - softmac protocol %x.%x\n",
+                          "FW rev %s - Softmac protocol %x.%x\n",
                           fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
 
        if (priv->fw_var < 0x500)
index ea91f5cce6b3f62d31f8c5d45609e650a2cc40a7..3837e1eec5f4a56fdbf80a2b8f13984f3d15742b 100644 (file)
@@ -58,7 +58,7 @@ static void p54_update_leds(struct work_struct *work)
        err = p54_set_leds(priv);
        if (err && net_ratelimit())
                wiphy_err(priv->hw->wiphy,
-                         "failed to update leds (%d).\n", err);
+                         "failed to update LEDs (%d).\n", err);
 
        if (rerun)
                ieee80211_queue_delayed_work(priv->hw, &priv->led_work,
@@ -103,7 +103,7 @@ static int p54_register_led(struct p54_common *priv,
        err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev);
        if (err)
                wiphy_err(priv->hw->wiphy,
-                         "failed to register %s led.\n", name);
+                         "Failed to register %s LED.\n", name);
        else
                led->registered = 1;
 
index 822f8dc26e9c051d3b9ee34e79ef715b0f204067..1eacba4daa5bb1d35edc265ac8862c464eac4387 100644 (file)
@@ -466,7 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev)
        P54P_READ(dev_int);
 
        if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
-               wiphy_err(dev->wiphy, "cannot boot firmware!\n");
+               wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
                p54p_stop(dev);
                return -ETIMEDOUT;
        }
index 427b46f558ed4f57b0066a2ae312be0d0e82c7ae..0e937dc0c9c41df6cff985c6b538f6cb17cf1091 100644 (file)
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
        }
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
-            (!payload->status))
+            !(payload->status & P54_TX_FAILED))
                info->flags |= IEEE80211_TX_STAT_ACK;
        if (payload->status & P54_TX_PSM_CANCELLED)
                info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
@@ -540,7 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
        case P54_TRAP_BEACON_TX:
                break;
        case P54_TRAP_RADAR:
-               wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq);
+               wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
                break;
        case P54_TRAP_NO_BEACON:
                if (priv->vif)
index b50c39aaec0522e63c89b30ad6cc6433747b6853..30107ce78dfb6cc8613c83c5ca525a5e8bf42314 100644 (file)
@@ -445,7 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
                                             &priv->rx_ring_dma);
 
        if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
-               wiphy_err(dev->wiphy, "cannot allocate rx ring\n");
+               wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
                return -ENOMEM;
        }
 
@@ -502,7 +502,7 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
 
        ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
        if (!ring || (unsigned long)ring & 0xFF) {
-               wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n",
+               wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
                          prio);
                return -ENOMEM;
        }
@@ -568,7 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
        ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
                          IRQF_SHARED, KBUILD_MODNAME, dev);
        if (ret) {
-               wiphy_err(dev->wiphy, "failed to register irq handler\n");
+               wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
                goto err_free_rings;
        }
 
index 5738a55c1b06b4c9bd7a8c30323266b0af2151b4..98e0351c1dd6923214b17919161793c873d89967 100644 (file)
@@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
        } while (--i);
 
        if (!i) {
-               wiphy_err(dev->wiphy, "reset timeout!\n");
+               wiphy_err(dev->wiphy, "Reset timeout!\n");
                return -ETIMEDOUT;
        }
 
@@ -1526,7 +1526,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
        mutex_init(&priv->conf_mutex);
        skb_queue_head_init(&priv->b_tx_status.queue);
 
-       wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n",
+       wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
                   mac_addr, chip_name, priv->asic_rev, priv->rf->name,
                   priv->rfkill_mask);
 
index fd96f9112322439f7bf3f72716f943459a9c347c..97eebdcf7eb9f432729c18a3ad090fe8934e0171 100644 (file)
@@ -366,7 +366,7 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
                rtl8225_write(dev, 0x02, 0x044d);
                msleep(100);
                if (!(rtl8225_read(dev, 6) & (1 << 7)))
-                       wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
+                       wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
                                   rtl8225_read(dev, 6));
        }
 
@@ -735,7 +735,7 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
                rtl8225_write(dev, 0x02, 0x044D);
                msleep(100);
                if (!(rtl8225_read(dev, 6) & (1 << 7)))
-                       wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
+                       wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
                                   rtl8225_read(dev, 6));
        }
 
index a9352b2c7ac430d4e4aafac3d65a1b46005ea505..b7e755f4178ad885332ccaaeeb5eda492e6dfcfd 100644 (file)
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
-
-static void end_sync(void)
-{
-       end_cpu_work();
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-}
-
-
 int sync_start(void)
 {
        int err;
@@ -158,7 +148,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       start_cpu_work();
+       mutex_lock(&buffer_mutex);
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
        if (err)
                goto out4;
 
+       start_cpu_work();
+
 out:
+       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
-       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -190,11 +182,20 @@ out1:
 
 void sync_stop(void)
 {
+       /* flush buffers */
+       mutex_lock(&buffer_mutex);
+       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       end_sync();
+       mutex_unlock(&buffer_mutex);
+       flush_scheduled_work();
+
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+
        free_cpumask_var(marked_cpus);
 }
 
index 219f79e2210a3fcd561b94456c4960a0a1fbacd9..f179ac2ea80149423034d66a90b1e81251c5069b 100644 (file)
@@ -120,8 +120,6 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
-
-       flush_scheduled_work();
 }
 
 /*
index 45fcc1e96df9ac08718696e7ea8a8f487f33072f..3bc72d18b121d3cff5ebcb7c8c05a5a06cdff78a 100644 (file)
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
        acpi_handle chandle, handle;
        struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
 
-       flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
-                 OSC_SHPC_NATIVE_HP_CONTROL |
-                 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+       flags &= OSC_SHPC_NATIVE_HP_CONTROL;
        if (!flags) {
                err("Invalid flags %u specified!\n", flags);
                return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
                acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
                dbg("Trying to get hotplug control for %s\n",
                                (char *)string.pointer);
-               status = acpi_pci_osc_control_set(handle, flags);
+               status = acpi_pci_osc_control_set(handle, &flags, flags);
                if (ACPI_SUCCESS(status))
                        goto got_one;
                if (status == AE_SUPPORT)
index 4ed76b47b6dcbd6358e083b11d5f8e823daa9cdb..73d5139892639bf798767ee927c9f6f4c26da013 100644 (file)
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
 {
        pciehp_acpi_slot_detection_init();
 }
-
-static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
-       int retval;
-       u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
-                    OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-       retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
-       if (retval)
-               return retval;
-       return pciehp_acpi_slot_detection_check(dev);
-}
 #else
 #define pciehp_firmware_init()                         do {} while (0)
-#define pciehp_get_hp_hw_control_from_firmware(dev)    0
+static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+       return 0;
+}
 #endif                                 /* CONFIG_ACPI */
 #endif                         /* _PCIEHP_H */
index 1f4000a5a108fa8d9f57383ffc03f9313c9fc2c0..2574700db461559717a95e0623c1c58c5821c687 100644 (file)
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
        acpi_handle handle;
        struct dummy_slot *slot, *tmp;
        struct pci_dev *pdev = dev->port;
-       /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
-       if (pciehp_get_hp_hw_control_from_firmware(pdev))
-               return -ENODEV;
+
        pos = pci_pcie_cap(pdev);
        if (!pos)
                return -ENODEV;
index 3588ea61b0dd244df58ac3fdc92adc61cdc512be..aa5f3ff629ff4b798d3b2e473e3e6dfb025be823 100644 (file)
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
 MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
 MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
 
 #define PCIE_MODULE_NAME "pciehp"
 
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
                dev_info(&dev->device,
                         "Bypassing BIOS check for pciehp use on %s\n",
                         pci_name(dev->port));
-       else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
+       else if (pciehp_acpi_slot_detection_check(dev->port))
                goto err_out_none;
 
        ctrl = pcie_init(dev);
index 679c39de6a89124e96a36252f32e8ff45991b7aa..7754a678ab15cc77445d396aa59409658bf8006f 100644 (file)
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
 
 #ifdef CONFIG_PCIEAER
 void pci_no_aer(void);
+bool pci_aer_available(void);
 #else
 static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
 #endif
 
 static inline int pci_no_d1d2(struct pci_dev *dev)
index ea654545e7c468c876e7d7b2a7eb637d53c168b9..00c62df5a9fc4773405c74aca96d894f7515c108 100644 (file)
@@ -6,10 +6,11 @@
 obj-$(CONFIG_PCIEASPM)         += aspm.o
 
 pcieportdrv-y                  := portdrv_core.o portdrv_pci.o portdrv_bus.o
+pcieportdrv-$(CONFIG_ACPI)     += portdrv_acpi.o
 
 obj-$(CONFIG_PCIEPORTBUS)      += pcieportdrv.o
 
 # Build PCI Express AER if needed
 obj-$(CONFIG_PCIEAER)          += aer/
 
-obj-$(CONFIG_PCIE_PME) += pme/
+obj-$(CONFIG_PCIE_PME) += pme.o
index 484cc55194b8841809218cb15d5f2ccccdffd582..f409948e1a9bb614cfe5fedc1dc652285f8e0307 100644 (file)
@@ -72,6 +72,11 @@ void pci_no_aer(void)
        pcie_aer_disable = 1;   /* has priority over 'forceload' */
 }
 
+bool pci_aer_available(void)
+{
+       return !pcie_aer_disable && pci_msi_enabled();
+}
+
 static int set_device_error_reporting(struct pci_dev *dev, void *data)
 {
        bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
  */
 static int __init aer_service_init(void)
 {
-       if (pcie_aer_disable)
-               return -ENXIO;
-       if (!pci_msi_enabled())
+       if (!pci_aer_available())
                return -ENXIO;
        return pcie_port_service_register(&aerdriver);
 }
index f278d7b0d95d32b636260cddbe763aa109fbfacf..2bb9b8972211073564b9d99b3cc85a148d79e583 100644 (file)
 #include <acpi/apei.h>
 #include "aerdrv.h"
 
-/**
- * aer_osc_setup - run ACPI _OSC method
- * @pciedev: pcie_device which AER is being enabled on
- *
- * @return: Zero on success. Nonzero otherwise.
- *
- * Invoked when PCIe bus loads AER service driver. To avoid conflict with
- * BIOS AER support requires BIOS to yield AER control to OS native driver.
- **/
-int aer_osc_setup(struct pcie_device *pciedev)
-{
-       acpi_status status = AE_NOT_FOUND;
-       struct pci_dev *pdev = pciedev->port;
-       acpi_handle handle = NULL;
-
-       if (acpi_pci_disabled)
-               return -1;
-
-       handle = acpi_find_root_bridge_handle(pdev);
-       if (handle) {
-               status = acpi_pci_osc_control_set(handle,
-                                       OSC_PCI_EXPRESS_AER_CONTROL |
-                                       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-       }
-
-       if (ACPI_FAILURE(status)) {
-               dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
-                          "init device: %s\n",
-                          (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
-                          "no _OSC support" : "_OSC failed");
-               return -1;
-       }
-
-       return 0;
-}
-
 #ifdef CONFIG_ACPI_APEI
 static inline int hest_match_pci(struct acpi_hest_aer_common *p,
                                 struct pci_dev *pci)
index fc0b5a93e1deaf3895ef671ae1f30892980c9352..29e268fadf14cb44d18fac7570e118ce7ba489c9 100644 (file)
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work)
  */
 int aer_init(struct pcie_device *dev)
 {
-       if (pcie_aer_get_firmware_first(dev->port)) {
-               dev_printk(KERN_DEBUG, &dev->device,
-                          "PCIe errors handled by platform firmware.\n");
-               goto out;
-       }
-
-       if (aer_osc_setup(dev))
-               goto out;
-
-       return 0;
-out:
        if (forceload) {
                dev_printk(KERN_DEBUG, &dev->device,
                           "aerdrv forceload requested.\n");
                pcie_aer_force_firmware_first(dev->port, 0);
-               return 0;
        }
-       return -ENXIO;
+       return 0;
 }
similarity index 83%
rename from drivers/pci/pcie/pme/pcie_pme.c
rename to drivers/pci/pcie/pme.c
index bbdea18693d94048fc2444dcb388c0a18817fc6e..2f3c904072273fcdaf5c5a660405281720dff7aa 100644 (file)
 #include <linux/pci-acpi.h>
 #include <linux/pm_runtime.h>
 
-#include "../../pci.h"
-#include "pcie_pme.h"
+#include "../pci.h"
+#include "portdrv.h"
 
 #define PCI_EXP_RTSTA_PME      0x10000 /* PME status */
 #define PCI_EXP_RTSTA_PENDING  0x20000 /* PME pending */
 
-/*
- * If set, this switch will prevent the PCIe root port PME service driver from
- * being registered.  Consequently, the interrupt-based PCIe PME signaling will
- * not be used by any PCIe root ports in that case.
- */
-static bool pcie_pme_disabled = true;
-
-/*
- * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
- * "In order to maintain compatibility with non-PCI Express-aware system
- * software, system power management logic must be configured by firmware to use
- * the legacy mechanism of signaling PME by default.  PCI Express-aware system
- * software must notify the firmware prior to enabling native, interrupt-based
- * PME signaling."  However, if the platform doesn't provide us with a suitable
- * notification mechanism or the notification fails, it is not clear whether or
- * not we are supposed to use the interrupt-based PCIe PME signaling.  The
- * switch below can be used to indicate the desired behaviour.  When set, it
- * will make the kernel use the interrupt-based PCIe PME signaling regardless of
- * the platform notification status, although the kernel will attempt to notify
- * the platform anyway.  When unset, it will prevent the kernel from using the
- * the interrupt-based PCIe PME signaling if the platform notification fails,
- * which is the default.
- */
-static bool pcie_pme_force_enable;
-
 /*
  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
  * causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
 
 static int __init pcie_pme_setup(char *str)
 {
-       if (!strncmp(str, "auto", 4))
-               pcie_pme_disabled = false;
-       else if (!strncmp(str, "force", 5))
-               pcie_pme_force_enable = true;
-
-       str = strchr(str, ',');
-       if (str) {
-               str++;
-               str += strspn(str, " \t");
-               if (*str && !strcmp(str, "nomsi"))
-                       pcie_pme_msi_disabled = true;
-       }
+       if (!strncmp(str, "nomsi", 5))
+               pcie_pme_msi_disabled = true;
 
        return 1;
 }
 __setup("pcie_pme=", pcie_pme_setup);
 
-/**
- * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
- * @srv: PCIe PME root port service to use for carrying out the check.
- *
- * Notify the platform that the native PCIe PME is going to be used and return
- * 'true' if the control of the PCIe PME registers has been acquired from the
- * platform.
- */
-static bool pcie_pme_platform_setup(struct pcie_device *srv)
-{
-       if (!pcie_pme_platform_notify(srv))
-               return true;
-       return pcie_pme_force_enable;
-}
-
 struct pcie_pme_service_data {
        spinlock_t lock;
        struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
  * @dev: PCIe root port or event collector.
  * @enable: Enable or disable the interrupt.
  */
-static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
 {
        int rtctl_pos;
        u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
        struct pcie_pme_service_data *data;
        int ret;
 
-       if (!pcie_pme_platform_setup(srv))
-               return -EACCES;
-
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
  */
 static int __init pcie_pme_service_init(void)
 {
-       return pcie_pme_disabled ?
-               -ENODEV : pcie_port_service_register(&pcie_pme_driver);
+       return pcie_port_service_register(&pcie_pme_driver);
 }
 
 module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644 (file)
index 8b92380..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for PCI-Express Root Port PME signaling driver
-#
-
-obj-$(CONFIG_PCIE_PME) += pmedriver.o
-
-pmedriver-objs := pcie_pme.o
-pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644 (file)
index b30d2b7..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drivers/pci/pcie/pme/pcie_pme.h
- *
- * PCI Express Root Port PME signaling support
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#ifndef _PCIE_PME_H_
-#define _PCIE_PME_H_
-
-struct pcie_device;
-
-#ifdef CONFIG_ACPI
-extern int pcie_pme_acpi_setup(struct pcie_device *srv);
-
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
-       return pcie_pme_acpi_setup(srv);
-}
-#else /* !CONFIG_ACPI */
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
-       return 0;
-}
-#endif /* !CONFIG_ACPI */
-
-#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644 (file)
index 83ab228..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * PCIe Native PME support, ACPI-related part
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License V2.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-#include <linux/pcieport_if.h>
-
-/**
- * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
- * @srv - PCIe PME service for a root port or event collector.
- *
- * Invoked when the PCIe bus type loads PCIe PME service driver.  To avoid
- * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
- * control to the kernel.
- */
-int pcie_pme_acpi_setup(struct pcie_device *srv)
-{
-       acpi_status status = AE_NOT_FOUND;
-       struct pci_dev *port = srv->port;
-       acpi_handle handle;
-       int error = 0;
-
-       if (acpi_pci_disabled)
-               return -ENOSYS;
-
-       dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
-
-       handle = acpi_find_root_bridge_handle(port);
-       if (!handle)
-               return -EINVAL;
-
-       status = acpi_pci_osc_control_set(handle,
-                       OSC_PCI_EXPRESS_PME_CONTROL |
-                       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-       if (ACPI_FAILURE(status)) {
-               dev_info(&port->dev,
-                       "Failed to receive control of PCIe PME service: %s\n",
-                       (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
-                       "no _OSC support" : "ACPI _OSC failed");
-               error = -ENODEV;
-       }
-
-       return error;
-}
index 813a5c3427b69261d0d54d5c531ffc9f9973ce64..7b5aba0a3291107a231a8b6ee6a90cacf0c4557c 100644 (file)
@@ -20,6 +20,9 @@
 
 #define get_descriptor_id(type, service) (((type - 4) << 4) | service)
 
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+
 extern struct bus_type pcie_port_bus_type;
 extern int pcie_port_device_register(struct pci_dev *dev);
 #ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
 extern int __must_check pcie_port_bus_register(void);
 extern void pcie_port_bus_unregister(void);
 
+struct pci_dev;
+
 #ifdef CONFIG_PCIE_PME
 extern bool pcie_pme_msi_disabled;
 
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
 {
        return pcie_pme_msi_disabled;
 }
+
+extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
 #else /* !CONFIG_PCIE_PME */
 static inline void pcie_pme_disable_msi(void) {}
 static inline bool pcie_pme_no_msi(void) { return false; }
+static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
 #endif /* !CONFIG_PCIE_PME */
 
+#ifdef CONFIG_ACPI
+extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+       return pcie_port_acpi_setup(port, mask);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+       return 0;
+}
+#endif /* !CONFIG_ACPI */
+
 #endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644 (file)
index 0000000..b7c4cb1
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * PCIe Port Native Services Support, ACPI-Related Part
+ *
+ * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+#include "aer/aerdrv.h"
+#include "../pci.h"
+
+/**
+ * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
+ * @port: PCIe Port service for a root port or event collector.
+ * @srv_mask: Bit mask of services that can be enabled for @port.
+ *
+ * Invoked when @port is identified as a PCIe port device.  To avoid conflicts
+ * with the BIOS PCIe port native services support requires the BIOS to yield
+ * control of these services to the kernel.  The mask of services that the BIOS
+ * allows to be enabled for @port is written to @srv_mask.
+ *
+ * NOTE: It turns out that we cannot do that for individual port services
+ * separately, because that would make some systems work incorrectly.
+ */
+int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+{
+       acpi_status status;
+       acpi_handle handle;
+       u32 flags;
+
+       if (acpi_pci_disabled)
+               return 0;
+
+       handle = acpi_find_root_bridge_handle(port);
+       if (!handle)
+               return -EINVAL;
+
+       flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+               | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+               | OSC_PCI_EXPRESS_PME_CONTROL;
+
+       if (pci_aer_available()) {
+               if (pcie_aer_get_firmware_first(port))
+                       dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
+               else
+                       flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+       }
+
+       status = acpi_pci_osc_control_set(handle, &flags,
+                                       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+       if (ACPI_FAILURE(status)) {
+               dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
+                       status);
+               return -ENODEV;
+       }
+
+       dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+
+       *srv_mask = PCIE_PORT_SERVICE_VC;
+       if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+               *srv_mask |= PCIE_PORT_SERVICE_HP;
+       if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
+               *srv_mask |= PCIE_PORT_SERVICE_PME;
+       if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
+               *srv_mask |= PCIE_PORT_SERVICE_AER;
+
+       return 0;
+}
index e73effbe402c55e82be4d8a821b3d39e64f96bac..a9c222d79ebc5dc32cd6c4fad37f616ac291ff6a 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/pci-aspm.h>
 
 #include "../pci.h"
 #include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
        int services = 0, pos;
        u16 reg16;
        u32 reg32;
+       int cap_mask;
+       int err;
+
+       err = pcie_port_platform_notify(dev, &cap_mask);
+       if (pcie_ports_auto) {
+               if (err) {
+                       pcie_no_aspm();
+                       return 0;
+               }
+       } else {
+               cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+                               | PCIE_PORT_SERVICE_VC;
+               if (pci_aer_available())
+                       cap_mask |= PCIE_PORT_SERVICE_AER;
+       }
 
        pos = pci_pcie_cap(dev);
        pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
        /* Hot-Plug Capable */
-       if (reg16 & PCI_EXP_FLAGS_SLOT) {
+       if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
                pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
-               if (reg32 & PCI_EXP_SLTCAP_HPC)
+               if (reg32 & PCI_EXP_SLTCAP_HPC) {
                        services |= PCIE_PORT_SERVICE_HP;
+                       /*
+                        * Disable hot-plug interrupts in case they have been
+                        * enabled by the BIOS and the hot-plug service driver
+                        * is not loaded.
+                        */
+                       pos += PCI_EXP_SLTCTL;
+                       pci_read_config_word(dev, pos, &reg16);
+                       reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+                       pci_write_config_word(dev, pos, reg16);
+               }
        }
        /* AER capable */
-       if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+       if ((cap_mask & PCIE_PORT_SERVICE_AER)
+           && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
                services |= PCIE_PORT_SERVICE_AER;
+               /*
+                * Disable AER on this port in case it's been enabled by the
+                * BIOS (the AER service driver will enable it when necessary).
+                */
+               pci_disable_pcie_error_reporting(dev);
+       }
        /* VC support */
        if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
                services |= PCIE_PORT_SERVICE_VC;
        /* Root ports are capable of generating PME too */
-       if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+       if ((cap_mask & PCIE_PORT_SERVICE_PME)
+           && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
                services |= PCIE_PORT_SERVICE_PME;
+               /*
+                * Disable PME interrupt on this port in case it's been enabled
+                * by the BIOS (the PME service driver will enable it when
+                * necessary).
+                */
+               pcie_pme_interrupt_enable(dev, false);
+       }
 
        return services;
 }
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
  */
 int pcie_port_service_register(struct pcie_port_service_driver *new)
 {
+       if (pcie_ports_disabled)
+               return -ENODEV;
+
        new->driver.name = (char *)new->name;
        new->driver.bus = &pcie_port_bus_type;
        new->driver.probe = pcie_port_probe_service;
index 3debed25e46bb63f4e70e4a58b0d9abff3fd33be..f9033e190fb62060e701ebf44b0eb1d3dfbefa90 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/pcieport_if.h>
 #include <linux/aer.h>
 #include <linux/dmi.h>
+#include <linux/pci-aspm.h>
 
 #include "portdrv.h"
 #include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
+/* If this switch is set, PCIe port native services should not be enabled. */
+bool pcie_ports_disabled;
+
+/*
+ * If this switch is set, ACPI _OSC will be used to determine whether or not to
+ * enable PCIe port native services.
+ */
+bool pcie_ports_auto = true;
+
+static int __init pcie_port_setup(char *str)
+{
+       if (!strncmp(str, "compat", 6)) {
+               pcie_ports_disabled = true;
+       } else if (!strncmp(str, "native", 6)) {
+               pcie_ports_disabled = false;
+               pcie_ports_auto = false;
+       } else if (!strncmp(str, "auto", 4)) {
+               pcie_ports_disabled = false;
+               pcie_ports_auto = true;
+       }
+
+       return 1;
+}
+__setup("pcie_ports=", pcie_port_setup);
+
 /* global data */
 
 static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
 {
        int retval;
 
+       if (pcie_ports_disabled) {
+               pcie_no_aspm();
+               return -EACCES;
+       }
+
        dmi_check_system(pcie_portdrv_dmi_table);
 
        retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
        return retval;
 }
 
-static void __exit pcie_portdrv_exit(void)
-{
-       pci_unregister_driver(&pcie_portdriver);
-       pcie_port_bus_unregister();
-}
-
 module_init(pcie_portdrv_init);
-module_exit(pcie_portdrv_exit);
index 659eaa0fc48facf81dad9c0b5c430498e01398e3..968cfea04f749ea76ffcb33bb62f114ecfd9a6ef 100644 (file)
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
 }
 
 /* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
+static const char *pci_bus_speed_strings[] = {
        "33 MHz PCI",           /* 0x00 */
        "66 MHz PCI",           /* 0x01 */
        "66 MHz PCI-X",         /* 0x02 */
index 044f430f3b4324eb031f32b3e9aeaf1307309a7d..cff7cc2c1f025295746416e86e88a07859e29154 100644 (file)
@@ -486,10 +486,12 @@ config TOPSTAR_LAPTOP
 config ACPI_TOSHIBA
        tristate "Toshiba Laptop Extras"
        depends on ACPI
+       depends on LEDS_CLASS
+       depends on NEW_LEDS
+       depends on BACKLIGHT_CLASS_DEVICE
        depends on INPUT
        depends on RFKILL || RFKILL = n
        select INPUT_POLLDEV
-       select BACKLIGHT_CLASS_DEVICE
        ---help---
          This driver adds support for access to certain system settings
          on "legacy free" Toshiba laptops.  These laptops can be recognized by
index f15516374987cef2cf0bae21383259759c4095a0..c1741142a4cb64c3cf1c705bbd1385c03d89778c 100644 (file)
@@ -79,12 +79,13 @@ struct bios_args {
        u32 command;
        u32 commandtype;
        u32 datasize;
-       char *data;
+       u32 data;
 };
 
 struct bios_return {
        u32 sigpass;
        u32 return_code;
+       u32 value;
 };
 
 struct key_entry {
@@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = {
  *       buffer = kzalloc(128, GFP_KERNEL);
  *       ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
  */
-static int hp_wmi_perform_query(int query, int write, char *buffer,
+static int hp_wmi_perform_query(int query, int write, u32 *buffer,
                                int buffersize)
 {
        struct bios_return bios_return;
@@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
                .command = write ? 0x2 : 0x1,
                .commandtype = query,
                .datasize = buffersize,
-               .data = buffer,
+               .data = *buffer,
        };
        struct acpi_buffer input = { sizeof(struct bios_args), &args };
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
 
        bios_return = *((struct bios_return *)obj->buffer.pointer);
 
-       if (bios_return.return_code) {
-               printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
-                      bios_return.return_code);
-               kfree(obj);
-               return bios_return.return_code;
-       }
-       if (obj->buffer.length - sizeof(bios_return) > buffersize) {
-               kfree(obj);
-               return -EINVAL;
-       }
-
-       memset(buffer, 0, buffersize);
-       memcpy(buffer,
-              ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
-              obj->buffer.length - sizeof(bios_return));
-       kfree(obj);
+       memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
        return 0;
 }
 
 static int hp_wmi_display_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return -EINVAL;
@@ -208,8 +194,8 @@ static int hp_wmi_display_state(void)
 
 static int hp_wmi_hddtemp_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return -EINVAL;
@@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void)
 
 static int hp_wmi_als_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return -EINVAL;
@@ -228,8 +214,8 @@ static int hp_wmi_als_state(void)
 
 static int hp_wmi_dock_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
                                       sizeof(state));
 
        if (ret)
@@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void)
 
 static int hp_wmi_tablet_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return ret;
@@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
        int ret;
 
        ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
-                                  (char *)&query, sizeof(query));
+                                  &query, sizeof(query));
        if (ret)
                return -EINVAL;
        return 0;
@@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
 
 static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
 {
-       int wireless;
+       int wireless = 0;
        int mask;
        hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
-                            (char *)&wireless, sizeof(wireless));
+                            &wireless, sizeof(wireless));
        /* TBD: Pass error */
 
        mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
 
 static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
 {
-       int wireless;
+       int wireless = 0;
        int mask;
        hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
-                            (char *)&wireless, sizeof(wireless));
+                            &wireless, sizeof(wireless));
        /* TBD: Pass error */
 
        mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
                       const char *buf, size_t count)
 {
        u32 tmp = simple_strtoul(buf, NULL, 10);
-       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp,
+       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
                                       sizeof(tmp));
        if (ret)
                return -EINVAL;
@@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context)
        static struct key_entry *key;
        union acpi_object *obj;
        u32 event_id, event_data;
-       int key_code, ret;
+       int key_code = 0, ret;
        u32 *location;
        acpi_status status;
 
@@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_BEZEL_BUTTON:
                ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
-                                          (char *)&key_code,
+                                          &key_code,
                                           sizeof(key_code));
                if (ret)
                        break;
@@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device)
 static int __devinit hp_wmi_bios_setup(struct platform_device *device)
 {
        int err;
-       int wireless;
+       int wireless = 0;
 
-       err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless,
+       err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
                                   sizeof(wireless));
        if (err)
                return err;
index 73f8e6d726694ad918ba89fa6da7e5507885a2a7..2b11a33325e6a93bf872e67e7d1ab549b98ccc9c 100644 (file)
@@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar)
  */
 static struct rar_device *_rar_to_device(int rar, int *off)
 {
-       if (rar >= 0 && rar <= 3) {
+       if (rar >= 0 && rar < MRST_NUM_RAR) {
                *off = rar;
                return &my_rar_device;
        }
index 943f9084dcb11734e73064332f903320478c3c1e..6abe18e638e976f325ba92c9892bd2cc1501f4a8 100644 (file)
@@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
                mdelay(1);
                *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
        } else if (cmd == IPC_I2C_WRITE) {
-               writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
+               writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
                mdelay(1);
                writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
        } else {
index 72b2bcc2c22413b1a63e465e355ea65084ec7b8e..d4fb82d85e9b36ab61e1626236cb98bf76364ea2 100644 (file)
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                enable_irq_wake(IRQ_RTC);
                bfin_rtc_sync_pending(&pdev->dev);
        } else
-               bfin_rtc_int_clear(-1);
+               bfin_rtc_int_clear(0);
 
        return 0;
 }
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
 {
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(IRQ_RTC);
-       else
-               bfin_write_RTC_ISTAT(-1);
+
+       /*
+        * Since only some of the RTC bits are maintained externally in the
+        * Vbat domain, we need to wait for the RTC MMRs to be synced into
+        * the core after waking up.  This happens every RTC 1HZ.  Once that
+        * has happened, we can go ahead and re-enable the important write
+        * complete interrupt event.
+        */
+       while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+               continue;
+       bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
 
        return 0;
 }
index 66377f3e28b851eaa908c6057a9646a639e9c229..d60557cae8ef4fdadadb10b343125f174cb7508d 100644 (file)
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->time.tm_isdst = -1;
        t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
        t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
-       return rtc_valid_tm(t);
+       return 0;
 }
 
 static struct rtc_class_ops m41t80_rtc_ops = {
index 6c418fe7f288ae2deaa9f44080a749a9eaafad88..b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f 100644 (file)
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        if (request_irq(adev->irq[0], pl031_interrupt,
-                       IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+                       IRQF_DISABLED, "rtc-pl031", ldata)) {
                ret = -EIO;
                goto out_no_irq;
        }
index c6cbcb3f925e094445d1935a6e44df791ea61f10..0e9a309b96691d6ec1c2209bb01a6f351a3a04d6 100644 (file)
 
 #ifdef CONFIG_MAGIC_SYSRQ
 static int ctrlchar_sysrq_key;
-static struct tty_struct *sysrq_tty;
 
 static void
 ctrlchar_handle_sysrq(struct work_struct *work)
 {
-       handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
+       handle_sysrq(ctrlchar_sysrq_key);
 }
 
 static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
        /* racy */
        if (len == 3 && buf[1] == '-') {
                ctrlchar_sysrq_key = buf[2];
-               sysrq_tty = tty;
                schedule_work(&ctrlchar_work);
                return CTRLCHAR_SYSRQ;
        }
index 18d9a497863bd5c2cdc3b4ba2160bdd450a19e4e..8cd58e412b5eae2cfe3188a03f616190146f8db0 100644 (file)
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
                if (kbd->sysrq) {
                        if (kbd->sysrq == K(KT_LATIN, '-')) {
                                kbd->sysrq = 0;
-                               handle_sysrq(value, kbd->tty);
+                               handle_sysrq(value);
                                return;
                        }
                        if (value == '-') {
index 7356a56ac458f697c54bdd8d1335b763a47f000b..be0ebce36e54906fce49dd9c1993d184ebaddbb9 100644 (file)
@@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info,
        tmp.close_delay = info->close_delay;
        tmp.closing_wait = info->closing_wait;
        tmp.custom_divisor = info->custom_divisor;
-       copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+       if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+               return -EFAULT;
+
        return 0;
 }
 
@@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info,
 
        if (!new_info)
                return -EFAULT;
-       copy_from_user(&new_serial,new_info,sizeof(new_serial));
+       if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+               return -EFAULT;
        old_info = *info;
 
        if (!capable(CAP_SYS_ADMIN)) {
@@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
        status = 0;
 #endif
        local_irq_restore(flags);
-       put_user(status,value);
-       return 0;
+       return put_user(status, value);
 }
 
 /*
@@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
                        send_break(info, arg ? arg*(100) : 250);
                        return 0;
                case TIOCGSERIAL:
-                       if (access_ok(VERIFY_WRITE, (void *) arg,
-                                               sizeof(struct serial_struct)))
-                               return get_serial_info(info,
-                                              (struct serial_struct *) arg);
-                       return -EFAULT;
+                       return get_serial_info(info,
+                                      (struct serial_struct *) arg);
                case TIOCSSERIAL:
                        return set_serial_info(info,
                                               (struct serial_struct *) arg);
                case TIOCSERGETLSR: /* Get line status register */
-                       if (access_ok(VERIFY_WRITE, (void *) arg,
-                                               sizeof(unsigned int)))
-                               return get_lsr_info(info, (unsigned int *) arg);
-                       return -EFAULT;
+                       return get_lsr_info(info, (unsigned int *) arg);
                case TIOCSERGSTRUCT:
-                       if (!access_ok(VERIFY_WRITE, (void *) arg,
-                                               sizeof(struct m68k_serial)))
+                       if (copy_to_user((struct m68k_serial *) arg,
+                                   info, sizeof(struct m68k_serial)))
                                return -EFAULT;
-                       copy_to_user((struct m68k_serial *) arg,
-                                   info, sizeof(struct m68k_serial));
                        return 0;
-                       
                default:
                        return -ENOIOCTLCMD;
                }
index b745792ec25a08e9981d22bbdd2897c99b6f738e..eaafb98debed89e2f1b2e5e83ac33cd79301792e 100644 (file)
@@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device,
 
        if (mmio || mmio32)
                printk(KERN_INFO
-                      "Early serial console at MMIO%s 0x%llu (options '%s')\n",
+                      "Early serial console at MMIO%s 0x%llx (options '%s')\n",
                        mmio32 ? "32" : "",
                        (unsigned long long)port->mapbase,
                        device->options);
        else
                printk(KERN_INFO
-                     "Early serial console at I/O port 0x%lu (options '%s')\n",
+                     "Early serial console at I/O port 0x%lx (options '%s')\n",
                        port->iobase,
                        device->options);
 
index e57fb3d228e2dd0881ab10f3410a457b46e0e405..5318dd3774ae156dc17f482a73e80f8c8a3ec77b 100644 (file)
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
        unsigned int sclk = get_sclk();
 
        /* Set TCR1 and TCR2, TFSR is not enabled for uart */
-       SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+       SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
        SPORT_PUT_TCR2(up, size + 1);
        pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
 
index 7e5e5efea4e27659ef564f28652e07377fcf19db..cff9a306660faede7827d705b5cf5d98fdef9067 100644 (file)
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
                         sysrq_requested = 0;
                         if (ch && time_before(jiffies, sysrq_timeout)) {
                                 spin_unlock_irqrestore(&port->sc_port.lock, flags);
-                                handle_sysrq(ch, NULL);
+                                handle_sysrq(ch);
                                 spin_lock_irqsave(&port->sc_port.lock, flags);
                                 /* ignore actual sysrq command char */
                                 continue;
index 4a7a7a7f11b67b6efd6b559023824dadeefdff07..335311a98fdcb0d4451c8fa2bfd604f937bae637 100644 (file)
@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
 
 source "drivers/staging/memrar/Kconfig"
 
-source "drivers/staging/sep/Kconfig"
-
 source "drivers/staging/iio/Kconfig"
 
 source "drivers/staging/zram/Kconfig"
index ca5c03eb3ce36e233280f7f4bb00ceae5697e4e9..e3f1e1b6095e5e790db3b9af1de06e7e29d4f9b7 100644 (file)
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL)          += udlfb/
 obj-$(CONFIG_HYPERV)           += hv/
 obj-$(CONFIG_VME_BUS)          += vme/
 obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
-obj-$(CONFIG_DX_SEP)           += sep/
 obj-$(CONFIG_IIO)              += iio/
 obj-$(CONFIG_ZRAM)             += zram/
 obj-$(CONFIG_WLAGS49_H2)       += wlags49_h2/
index b4a8d5eb64fabc4879bd11e27577c2ed12f5b00a..05ca15a6c9f8b7f919f587e35e856d6de1310c2a 100644 (file)
@@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
        if (atomic_read(&bat_priv->log_level) == log_level_tmp)
                return count;
 
+       bat_info(net_dev, "Changing log level from: %i to: %li\n",
+                atomic_read(&bat_priv->log_level),
+                log_level_tmp);
+
        atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
        return count;
 }
index 92c216a568856b4592e6a38427163bcec386aab3..baa8b05b9e8d70a49ba2dcb63e5e8c3cea7ed341 100644 (file)
@@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
 
 static void update_mac_addresses(struct batman_if *batman_if)
 {
+       if (!batman_if || !batman_if->packet_buff)
+               return;
+
        addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
 
        memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev,
        if (batman_if->if_status != IF_INACTIVE)
                return;
 
-       dev_hold(batman_if->net_dev);
-
        update_mac_addresses(batman_if);
        batman_if->if_status = IF_TO_BE_ACTIVATED;
 
@@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
           (batman_if->if_status != IF_TO_BE_ACTIVATED))
                return;
 
-       dev_put(batman_if->net_dev);
-
        batman_if->if_status = IF_INACTIVE;
 
        bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
@@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
        if (ret != 1)
                goto out;
 
+       dev_hold(net_dev);
+
        batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
        if (!batman_if) {
                pr_err("Can't add interface (%s): out of memory\n",
                       net_dev->name);
-               goto out;
+               goto release_dev;
        }
 
        batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
        batman_if->if_num = -1;
        batman_if->net_dev = net_dev;
        batman_if->if_status = IF_NOT_IN_USE;
+       batman_if->packet_buff = NULL;
        INIT_LIST_HEAD(&batman_if->list);
 
        check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -346,6 +348,8 @@ free_dev:
        kfree(batman_if->dev);
 free_if:
        kfree(batman_if);
+release_dev:
+       dev_put(net_dev);
 out:
        return NULL;
 }
@@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
        batman_if->if_status = IF_TO_BE_REMOVED;
        list_del_rcu(&batman_if->list);
        sysfs_del_hardif(&batman_if->hardif_obj);
+       dev_put(batman_if->net_dev);
        call_rcu(&batman_if->rcu, hardif_free_interface);
 }
 
@@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this,
        /* FIXME: each batman_if will be attached to a softif */
        struct bat_priv *bat_priv = netdev_priv(soft_device);
 
-       if (!batman_if)
-               batman_if = hardif_add_interface(net_dev);
+       if (!batman_if && event == NETDEV_REGISTER)
+                       batman_if = hardif_add_interface(net_dev);
 
        if (!batman_if)
                goto out;
 
        switch (event) {
-       case NETDEV_REGISTER:
-               break;
        case NETDEV_UP:
                hardif_activate_interface(soft_device, bat_priv, batman_if);
                break;
@@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct batman_packet *batman_packet;
        struct batman_if *batman_if;
-       struct net_device_stats *stats;
-       struct rtnl_link_stats64 temp;
        int ret;
 
        skb = skb_share_check(skb, GFP_ATOMIC);
@@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        if (batman_if->if_status != IF_ACTIVE)
                goto err_free;
 
-       stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
-       if (stats) {
-               stats->rx_packets++;
-               stats->rx_bytes += skb->len;
-       }
-
        batman_packet = (struct batman_packet *)skb->data;
 
        if (batman_packet->version != COMPAT_VERSION) {
index fc3d32c127292ab5dc717b59e169d6e19f52f10b..3ae7dd2d2d4d9fbea848b7b57a41a4e1d78b2b18 100644 (file)
@@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
        INIT_LIST_HEAD(&socket_client->queue_list);
        socket_client->queue_len = 0;
        socket_client->index = i;
+       socket_client->bat_priv = inode->i_private;
        spin_lock_init(&socket_client->lock);
        init_waitqueue_head(&socket_client->queue_wait);
 
@@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
 static ssize_t bat_socket_write(struct file *file, const char __user *buff,
                                size_t len, loff_t *off)
 {
-       /* FIXME: each orig_node->batman_if will be attached to a softif */
-       struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct socket_client *socket_client = file->private_data;
+       struct bat_priv *bat_priv = socket_client->bat_priv;
        struct icmp_packet_rr icmp_packet;
        struct orig_node *orig_node;
        struct batman_if *batman_if;
@@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
                return -EINVAL;
        }
 
+       if (!bat_priv->primary_if)
+               return -EFAULT;
+
        if (len >= sizeof(struct icmp_packet_rr))
                packet_len = sizeof(struct icmp_packet_rr);
 
@@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        if (batman_if->if_status != IF_ACTIVE)
                goto dst_unreach;
 
-       memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(icmp_packet.orig,
+              bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 
        if (packet_len == sizeof(struct icmp_packet_rr))
                memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
@@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv)
                goto err;
 
        d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
-                               bat_priv->debug_dir, NULL, &fops);
+                               bat_priv->debug_dir, bat_priv, &fops);
        if (d)
                goto err;
 
index 2686019fe4e133f60e445cf2f2b5eddcf211cd73..ef7c20ae7979af145b0dcf592bf1c226acdf9f71 100644 (file)
@@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size)
 int is_my_mac(uint8_t *addr)
 {
        struct batman_if *batman_if;
+
        rcu_read_lock();
        list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if ((batman_if->net_dev) &&
-                   (compare_orig(batman_if->net_dev->dev_addr, addr))) {
+               if (batman_if->if_status != IF_ACTIVE)
+                       continue;
+
+               if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
                        rcu_read_unlock();
                        return 1;
                }
index 28bb627ffa135ceb9022621d33362e8611887b46..de5a8c1a810462da1d2f7e8779afd582eb779784 100644 (file)
@@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
 int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
 {
        struct orig_node *orig_node;
+       unsigned long flags;
        HASHIT(hashit);
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock(&orig_hash_lock);
+       spin_lock_irqsave(&orig_hash_lock, flags);
 
        while (hash_iterate(orig_hash, &hashit)) {
                orig_node = hashit.bucket->data;
@@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
                        goto err;
        }
 
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return 0;
 
 err:
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return -ENOMEM;
 }
 
@@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
 {
        struct batman_if *batman_if_tmp;
        struct orig_node *orig_node;
+       unsigned long flags;
        HASHIT(hashit);
        int ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock(&orig_hash_lock);
+       spin_lock_irqsave(&orig_hash_lock, flags);
 
        while (hash_iterate(orig_hash, &hashit)) {
                orig_node = hashit.bucket->data;
@@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
        rcu_read_unlock();
 
        batman_if->if_num = -1;
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return 0;
 
 err:
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return -ENOMEM;
 }
index 066cc9149bf1be3c38cb1984ea39c8a3201f1a68..032195e6de94a5eed40831c116246628fb2458f9 100644 (file)
@@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb,
 
 static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
 {
+       /* FIXME: each batman_if will be attached to a softif */
+       struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct orig_node *orig_node;
        struct icmp_packet_rr *icmp_packet;
        struct ethhdr *ethhdr;
@@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
                return NET_RX_DROP;
        }
 
+       if (!bat_priv->primary_if)
+               return NET_RX_DROP;
+
        /* answer echo request (ping) */
        /* get routing information */
        spin_lock_irqsave(&orig_hash_lock, flags);
@@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
                }
 
                memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+               memcpy(icmp_packet->orig,
+                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
                icmp_packet->msg_type = ECHO_REPLY;
                icmp_packet->ttl = TTL;
 
@@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
 
 static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
 {
+       /* FIXME: each batman_if will be attached to a softif */
+       struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct orig_node *orig_node;
        struct icmp_packet *icmp_packet;
        struct ethhdr *ethhdr;
@@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
                return NET_RX_DROP;
        }
 
+       if (!bat_priv->primary_if)
+               return NET_RX_DROP;
+
        /* get routing information */
        spin_lock_irqsave(&orig_hash_lock, flags);
        orig_node = ((struct orig_node *)
@@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
                }
 
                memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+               memcpy(icmp_packet->orig,
+                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
                icmp_packet->msg_type = TTL_EXCEEDED;
                icmp_packet->ttl = TTL;
 
index 21d0717afb09ef7d7f4c6d066a6218c3085392bf..9aa9d369c7522cb9c1db35edab0af15e9e39419a 100644 (file)
@@ -126,6 +126,7 @@ struct socket_client {
        unsigned char index;
        spinlock_t lock;
        wait_queue_head_t queue_wait;
+       struct bat_priv *bat_priv;
 };
 
 struct socket_packet {
index c6aa52f8dcee3c5f97ea3df1a3f9374cf9a04f04..48d9fb1227df0d9d9d79c2848cc8615a6c52b837 100644 (file)
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
                p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
                p_dev->resource[0]->flags |=
                        pcmcia_io_cfg_data_width(io->flags);
-               p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
                p_dev->resource[0]->start = io->win[0].base;
                p_dev->resource[0]->end = io->win[0].len;
                if (io->nwin > 1) {
index 56e11575c97763f1747a35ae67db81b49cdd6325..64a01147ecaeffc17863bd0002df415da56da8f2 100644 (file)
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = {
        .ndo_stop =                     netvsc_close,
        .ndo_start_xmit =               netvsc_start_xmit,
        .ndo_set_multicast_list =       netvsc_set_multicast_list,
+       .ndo_change_mtu =               eth_change_mtu,
+       .ndo_validate_addr =            eth_validate_addr,
+       .ndo_set_mac_address =          eth_mac_addr,
 };
 
 static int netvsc_probe(struct device *device)
index 17bc7626f70a71d991aa2bdc9c50a960657e4f90..d78c569ac94a2aa7ae4ba6b2325dbf6d2b93c45c 100644 (file)
@@ -193,8 +193,7 @@ Description:
 static inline u64
 GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
 {
-       return ((u64)RingInfo->RingBuffer->WriteIndex << 32)
-       || RingInfo->RingBuffer->ReadIndex;
+       return (u64)RingInfo->RingBuffer->WriteIndex << 32;
 }
 
 
index 0063bde9a4b23ac22bbaad19d1912518a855c008..8505a1c5f9ee6bab72259539fa0d1c845fbba1c9 100644 (file)
 #include "vmbus_api.h"
 
 /* Defines */
-#define STORVSC_RING_BUFFER_SIZE                       (10*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE                       (20*PAGE_SIZE)
 #define BLKVSC_RING_BUFFER_SIZE                                (20*PAGE_SIZE)
 
-#define STORVSC_MAX_IO_REQUESTS                                64
+#define STORVSC_MAX_IO_REQUESTS                                128
 
 /*
  * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
index 075b61bd492f19918c1ee3027994362b2b6aa900..62882a437aa45abb072fede360ffa3672dc37b9d 100644 (file)
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 
                /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
 
-               if (j == 0)
+               if (bounce_addr == 0)
                        bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
 
                while (srclen) {
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
                destlen = orig_sgl[i].length;
                /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
 
-               if (j == 0)
+               if (bounce_addr == 0)
                        bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
 
                while (destlen) {
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
        unsigned int request_size = 0;
        int i;
        struct scatterlist *sgl;
+       unsigned int sg_count = 0;
 
        DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
                   "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
        request->DataBuffer.Length = scsi_bufflen(scmnd);
        if (scsi_sg_count(scmnd)) {
                sgl = (struct scatterlist *)scsi_sglist(scmnd);
+               sg_count = scsi_sg_count(scmnd);
 
                /* check if we need to bounce the sgl */
                if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
                                              scsi_sg_count(scmnd));
 
                        sgl = cmd_request->bounce_sgl;
+                       sg_count = cmd_request->bounce_sgl_count;
                }
 
                request->DataBuffer.Offset = sgl[0].offset;
 
-               for (i = 0; i < scsi_sg_count(scmnd); i++) {
+               for (i = 0; i < sg_count; i++) {
                        DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
                                   i, sgl[i].length, sgl[i].offset);
                        request->DataBuffer.PfnArray[i] =
-                                       page_to_pfn(sg_page((&sgl[i])));
+                               page_to_pfn(sg_page((&sgl[i])));
                }
        } else if (scsi_sglist(scmnd)) {
                /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
index 638ad6b3589107d6f6f146e64eb6f4011d425597..9493128e5fd2d7a743521781eddcd507622310a2 100644 (file)
@@ -1,6 +1,6 @@
 config OCTEON_ETHERNET
        tristate "Cavium Networks Octeon Ethernet support"
-       depends on CPU_CAVIUM_OCTEON
+       depends on CPU_CAVIUM_OCTEON && NETDEVICES
        select PHYLIB
        select MDIO_OCTEON
        help
index a0fe31de0a6d1299c802062cff4007b971d6a594..ebf9074a90838367fc7dde6aac3cba0fe6a3e131 100644 (file)
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x07B8, 0x2870)},   /* AboCom */
        {USB_DEVICE(0x07B8, 0x2770)},   /* AboCom */
        {USB_DEVICE(0x0DF6, 0x0039)},   /* Sitecom 2770 */
+       {USB_DEVICE(0x0DF6, 0x003F)},   /* Sitecom 2770 */
        {USB_DEVICE(0x083A, 0x7512)},   /* Arcadyan 2770 */
        {USB_DEVICE(0x0789, 0x0162)},   /* Logitec 2870 */
        {USB_DEVICE(0x0789, 0x0163)},   /* Logitec 2870 */
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x050d, 0x815c)},
        {USB_DEVICE(0x1482, 0x3C09)},   /* Abocom */
        {USB_DEVICE(0x14B2, 0x3C09)},   /* Alpha */
-       {USB_DEVICE(0x04E8, 0x2018)},   /* samsung */
+       {USB_DEVICE(0x04E8, 0x2018)},   /* samsung linkstick2 */
+       {USB_DEVICE(0x1690, 0x0740)},   /* Askey */
        {USB_DEVICE(0x5A57, 0x0280)},   /* Zinwell */
        {USB_DEVICE(0x5A57, 0x0282)},   /* Zinwell */
        {USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x1737, 0x0071)},   /* Linksys WUSB600N */
        {USB_DEVICE(0x0411, 0x00e8)},   /* Buffalo WLI-UC-G300N */
        {USB_DEVICE(0x050d, 0x815c)},   /* Belkin F5D8053 */
+       {USB_DEVICE(0x100D, 0x9031)},   /* Motorola 2770 */
 #endif /* RT2870 // */
 #ifdef RT3070
        {USB_DEVICE(0x148F, 0x3070)},   /* Ralink 3070 */
        {USB_DEVICE(0x148F, 0x3071)},   /* Ralink 3071 */
        {USB_DEVICE(0x148F, 0x3072)},   /* Ralink 3072 */
        {USB_DEVICE(0x0DB0, 0x3820)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x871C)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x822C)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x871B)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x822B)},   /* Ralink 3070 */
        {USB_DEVICE(0x0DF6, 0x003E)},   /* Sitecom 3070 */
        {USB_DEVICE(0x0DF6, 0x0042)},   /* Sitecom 3072 */
+       {USB_DEVICE(0x0DF6, 0x0048)},   /* Sitecom 3070 */
+       {USB_DEVICE(0x0DF6, 0x0047)},   /* Sitecom 3071 */
        {USB_DEVICE(0x14B2, 0x3C12)},   /* AL 3070 */
        {USB_DEVICE(0x18C5, 0x0012)},   /* Corega 3070 */
        {USB_DEVICE(0x083A, 0x7511)},   /* Arcadyan 3070 */
+       {USB_DEVICE(0x083A, 0xA701)},   /* SMC 3070 */
+       {USB_DEVICE(0x083A, 0xA702)},   /* SMC 3072 */
        {USB_DEVICE(0x1740, 0x9703)},   /* EnGenius 3070 */
        {USB_DEVICE(0x1740, 0x9705)},   /* EnGenius 3071 */
        {USB_DEVICE(0x1740, 0x9706)},   /* EnGenius 3072 */
+       {USB_DEVICE(0x1740, 0x9707)},   /* EnGenius 3070 */
+       {USB_DEVICE(0x1740, 0x9708)},   /* EnGenius 3071 */
+       {USB_DEVICE(0x1740, 0x9709)},   /* EnGenius 3072 */
        {USB_DEVICE(0x13D3, 0x3273)},   /* AzureWave 3070 */
+       {USB_DEVICE(0x13D3, 0x3305)},   /* AzureWave 3070*/
        {USB_DEVICE(0x1044, 0x800D)},   /* Gigabyte GN-WB32L 3070 */
        {USB_DEVICE(0x2019, 0xAB25)},   /* Planex Communications, Inc. RT3070 */
        {USB_DEVICE(0x07B8, 0x3070)},   /* AboCom 3070 */
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x07D1, 0x3C0D)},   /* D-Link 3070 */
        {USB_DEVICE(0x07D1, 0x3C0E)},   /* D-Link 3070 */
        {USB_DEVICE(0x07D1, 0x3C0F)},   /* D-Link 3070 */
+       {USB_DEVICE(0x07D1, 0x3C16)},   /* D-Link 3070 */
+       {USB_DEVICE(0x07D1, 0x3C17)},   /* D-Link 8070 */
        {USB_DEVICE(0x1D4D, 0x000C)},   /* Pegatron Corporation 3070 */
        {USB_DEVICE(0x1D4D, 0x000E)},   /* Pegatron Corporation 3070 */
        {USB_DEVICE(0x5A57, 0x5257)},   /* Zinwell 3070 */
        {USB_DEVICE(0x5A57, 0x0283)},   /* Zinwell 3072 */
        {USB_DEVICE(0x04BB, 0x0945)},   /* I-O DATA 3072 */
+       {USB_DEVICE(0x04BB, 0x0947)},   /* I-O DATA 3070 */
+       {USB_DEVICE(0x04BB, 0x0948)},   /* I-O DATA 3072 */
        {USB_DEVICE(0x203D, 0x1480)},   /* Encore 3070 */
+       {USB_DEVICE(0x20B8, 0x8888)},   /* PARA INDUSTRIAL 3070 */
+       {USB_DEVICE(0x0B05, 0x1784)},   /* Asus 3072 */
+       {USB_DEVICE(0x203D, 0x14A9)},   /* Encore 3070*/
+       {USB_DEVICE(0x0DB0, 0x899A)},   /* MSI 3070*/
+       {USB_DEVICE(0x0DB0, 0x3870)},   /* MSI 3070*/
+       {USB_DEVICE(0x0DB0, 0x870A)},   /* MSI 3070*/
+       {USB_DEVICE(0x0DB0, 0x6899)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x3822)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x3871)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x871A)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x822A)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x3821)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x821A)},   /* Ralink 3070 */
+       {USB_DEVICE(0x083A, 0xA703)},   /* IO-MAGIC */
+       {USB_DEVICE(0x13D3, 0x3307)},   /* Azurewave */
+       {USB_DEVICE(0x13D3, 0x3321)},   /* Azurewave */
+       {USB_DEVICE(0x07FA, 0x7712)},   /* Edimax */
+       {USB_DEVICE(0x0789, 0x0166)},   /* Edimax */
+       {USB_DEVICE(0x148F, 0x2070)},   /* Edimax */
 #endif /* RT3070 // */
-       {USB_DEVICE(0x0DF6, 0x003F)},   /* Sitecom WL-608 */
        {USB_DEVICE(0x1737, 0x0077)},   /* Linksys WUSB54GC-EU v3 */
        {USB_DEVICE(0x2001, 0x3C09)},   /* D-Link */
        {USB_DEVICE(0x2001, 0x3C0A)},   /* D-Link 3072 */
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644 (file)
index 0a9c39c..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-config DX_SEP
-       tristate "Discretix SEP driver"
-#      depends on MRST
-       depends on RAR_REGISTER && PCI
-       default y
-       help
-         Discretix SEP driver
-
-         If unsure say M. The compiled module will be
-         called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644 (file)
index 628d5f9..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644 (file)
index ff0e931..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-Todo's so far (from Alan Cox)
-- Fix firmware loading
-- Get firmware into firmware git tree
-- Review and tidy each algorithm function
-- Check whether it can be plugged into any of the kernel crypto API
-  interfaces
-- Do something about the magic shared memory interface and replace it
-  with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644 (file)
index 9200524..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef __SEP_DEV_H__
-#define __SEP_DEV_H__
-
-/*
- *
- *  sep_dev.h - Security Processor Device Structures
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Alan Cox           alan@linux.intel.com
- *
- */
-
-struct sep_device {
-       /* pointer to pci dev */
-       struct pci_dev *pdev;
-
-       unsigned long in_use;
-
-       /* address of the shared memory allocated during init for SEP driver
-          (coherent alloc) */
-       void *shared_addr;
-       /* the physical address of the shared area */
-       dma_addr_t shared_bus;
-
-       /* restricted access region (coherent alloc) */
-       dma_addr_t rar_bus;
-       void *rar_addr;
-       /* firmware regions: cache is at rar_addr */
-       unsigned long cache_size;
-
-       /* follows the cache */
-       dma_addr_t resident_bus;
-       unsigned long resident_size;
-       void *resident_addr;
-
-       /* start address of the access to the SEP registers from driver */
-       void __iomem *reg_addr;
-       /* transaction counter that coordinates the transactions between SEP and HOST */
-       unsigned long send_ct;
-       /* counter for the messages from sep */
-       unsigned long reply_ct;
-       /* counter for the number of bytes allocated in the pool for the current
-          transaction */
-       unsigned long data_pool_bytes_allocated;
-
-       /* array of pointers to the pages that represent input data for the synchronic
-          DMA action */
-       struct page **in_page_array;
-
-       /* array of pointers to the pages that represent out data for the synchronic
-          DMA action */
-       struct page **out_page_array;
-
-       /* number of pages in the sep_in_page_array */
-       unsigned long in_num_pages;
-
-       /* number of pages in the sep_out_page_array */
-       unsigned long out_num_pages;
-
-       /* global data for every flow */
-       struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
-
-       /* pointer to the workqueue that handles the flow done interrupts */
-       struct workqueue_struct *flow_wq;
-
-};
-
-static struct sep_device *sep_dev;
-
-static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
-{
-       void __iomem *addr = dev->reg_addr + reg;
-       writel(value, addr);
-}
-
-static inline u32 sep_read_reg(struct sep_device *dev, int reg)
-{
-       void __iomem *addr = dev->reg_addr + reg;
-       return readl(addr);
-}
-
-/* wait for SRAM write complete(indirect write */
-static inline void sep_wait_sram_write(struct sep_device *dev)
-{
-       u32 reg_val;
-       do
-               reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
-       while (!(reg_val & 1));
-}
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644 (file)
index ecbde34..0000000
+++ /dev/null
@@ -1,2742 +0,0 @@
-/*
- *
- *  sep_driver.c - Security Processor Driver main group of functions
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define  CRYS_SEP_ROM_length                  0x4000
-#define  CRYS_SEP_ROM_start_address           0x8000C000UL
-#define  CRYS_SEP_ROM_start_address_offset    0xC000UL
-#define  SEP_ROM_BANK_register                0x80008420UL
-#define  SEP_ROM_BANK_register_offset         0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS   0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS   0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
-       /* Index variables */
-       unsigned long i, k, j;
-       u32 reg;
-       u32 error;
-       u32 warning;
-
-       /* Loading ROM from SEP_ROM_image.h file */
-       k = sizeof(CRYS_SEP_ROM);
-
-       edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
-       edbg("SEP Driver: k is %lu\n", k);
-       edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
-       edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
-       for (i = 0; i < 4; i++) {
-               /* write bank */
-               sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
-               for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
-                       sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
-                       k = k - 4;
-
-                       if (k == 0) {
-                               j = CRYS_SEP_ROM_length;
-                               i = 4;
-                       }
-               }
-       }
-
-       /* reset the SEP */
-       sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
-       /* poll for SEP ROM boot finish */
-       do
-               reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
-       while (!reg);
-
-       edbg("SEP Driver: ROM polling ended\n");
-
-       switch (reg) {
-       case 0x1:
-               /* fatal error - read erro status from GPRO */
-               error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-               edbg("SEP Driver: ROM polling case 1\n");
-               break;
-       case 0x4:
-               /* Cold boot ended successfully  */
-       case 0x8:
-               /* Warmboot ended successfully */
-       case 0x10:
-               /* ColdWarm boot ended successfully */
-               error = 0;
-       case 0x2:
-               /* Boot First Phase ended  */
-               warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-       case 0x20:
-               edbg("SEP Driver: ROM polling case %d\n", reg);
-               break;
-       }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif                         /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
-       DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
-       GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
-  mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- *     sep_load_firmware       -       copy firmware cache/resident
- *     @sep: device we are loading
- *
- *     This functions copies the cache and resident from their source
- *     location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
-       const struct firmware *fw;
-       char *cache_name = "sep/cache.image.bin";
-       char *res_name = "sep/resident.image.bin";
-       int error;
-
-       edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-       edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
-       /* load cache */
-       error = request_firmware(&fw, cache_name, &sep->pdev->dev);
-       if (error) {
-               edbg("SEP Driver:cant request cache fw\n");
-               return error;
-       }
-       edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
-       memcpy(sep->rar_addr, (void *)fw->data, fw->size);
-       sep->cache_size = fw->size;
-       release_firmware(fw);
-
-       sep->resident_bus = sep->rar_bus + sep->cache_size;
-       sep->resident_addr = sep->rar_addr + sep->cache_size;
-
-       /* load resident */
-       error = request_firmware(&fw, res_name, &sep->pdev->dev);
-       if (error) {
-               edbg("SEP Driver:cant request res fw\n");
-               return error;
-       }
-       edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
-       memcpy(sep->resident_addr, (void *) fw->data, fw->size);
-       sep->resident_size = fw->size;
-       release_firmware(fw);
-
-       edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
-               sep->resident_addr, (unsigned long long)sep->resident_bus,
-               sep->rar_addr, (unsigned long long)sep->rar_bus);
-       return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- *     sep_map_and_alloc_shared_area   -       allocate shared block
- *     @sep: security processor
- *     @size: size of shared area
- *
- *     Allocate a shared buffer in host memory that can be used by both the
- *     kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
-                                                       unsigned long size)
-{
-       /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
-       sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
-                                       &sep->shared_bus, GFP_KERNEL);
-
-       if (!sep->shared_addr) {
-               edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
-               return -ENOMEM;
-       }
-       /* set the bus address of the shared area */
-       edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
-               size, sep->shared_addr, (unsigned long long)sep->shared_bus);
-       return 0;
-}
-
-/**
- *     sep_unmap_and_free_shared_area  -       free shared block
- *     @sep: security processor
- *
- *     Free the shared area allocated to the security processor. The
- *     processor must have finished with this and any final posted
- *     writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
-       dma_free_coherent(&sep->pdev->dev, size,
-                               sep->shared_addr, sep->shared_bus);
-}
-
-/**
- *     sep_shared_virt_to_bus  -       convert bus/virt addresses
- *
- *     Returns the bus address inside the shared area according
- *     to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
-                                               void *virt_address)
-{
-       dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
-       edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
-                                                               virt_address);
-       return pa;
-}
-
-/**
- *     sep_shared_bus_to_virt  -       convert bus/virt addresses
- *
- *     Returns virtual address inside the shared area according
- *     to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
-                                               dma_addr_t bus_address)
-{
-       return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- *     sep_try_open            -       attempt to open a SEP device
- *     @sep: device to attempt to open
- *
- *     Atomically attempt to get ownership of a SEP device.
- *     Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
-       if (!test_and_set_bit(0, &sep->in_use))
-               return 1;
-       return 0;
-}
-
-/**
- *     sep_open                -       device open method
- *     @inode: inode of sep device
- *     @filp: file handle to sep device
- *
- *     Open method for the SEP device. Called when userspace opens
- *     the SEP device node. Must also release the memory data pool
- *     allocations.
- *
- *     Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
-       if (sep_dev == NULL)
-               return -ENODEV;
-
-       /* check the blocking mode */
-       if (filp->f_flags & O_NDELAY) {
-               if (sep_try_open(sep_dev) == 0)
-                       return -EAGAIN;
-       } else
-               if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
-                       return -EINTR;
-
-       /* Bind to the device, we only have one which makes it easy */
-       filp->private_data = sep_dev;
-       /* release data pool allocations */
-       sep_dev->data_pool_bytes_allocated = 0;
-       return 0;
-}
-
-
-/**
- *     sep_release             -       close a SEP device
- *     @inode: inode of SEP device
- *     @filp: file handle being closed
- *
- *     Called on the final close of a SEP device. As the open protects against
- *     multiple simultaenous opens that means this method is called when the
- *     final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
-       struct sep_device *sep =  filp->private_data;
-#if 0                          /*!SEP_DRIVER_POLLING_MODE */
-       /* close IMR */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-       /* release IRQ line */
-       free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
-       /* Ensure any blocked open progresses */
-       clear_bit(0, &sep->in_use);
-       wake_up(&sep_event);
-       return 0;
-}
-
-/*---------------------------------------------------------------
-  map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-       dma_addr_t bus_addr;
-       struct sep_device *sep = filp->private_data;
-
-       dbg("-------->SEP Driver: mmap start\n");
-
-       /* check that the size of the mapped range is as the size of the message
-          shared area */
-       if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
-               edbg("SEP Driver mmap requested size is more than allowed\n");
-               printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
-               printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
-               printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
-               return -EAGAIN;
-       }
-
-       edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
-       /* get bus address */
-       bus_addr = sep->shared_bus;
-
-       edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
-       if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
-               edbg("SEP Driver remap_page_range failed\n");
-               printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
-               return -EAGAIN;
-       }
-
-       dbg("SEP Driver:<-------- mmap end\n");
-
-       return 0;
-}
-
-
-/*-----------------------------------------------
-  poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
-       unsigned long count;
-       unsigned int mask = 0;
-       unsigned long retval = 0;       /* flow id */
-       struct sep_device *sep = filp->private_data;
-
-       dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
-       while (sep->send_ct != (retval & 0x7FFFFFFF)) {
-               retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
-               for (count = 0; count < 10 * 4; count += 4)
-                       edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
-       }
-
-       sep->reply_ct++;
-#else
-       /* add the event to the polling wait table */
-       poll_wait(filp, &sep_event, wait);
-
-#endif
-
-       edbg("sep->send_ct is %lu\n", sep->send_ct);
-       edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
-       /* check if the data is ready */
-       if (sep->send_ct == sep->reply_ct) {
-               for (count = 0; count < 12 * 4; count += 4)
-                       edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
-               for (count = 0; count < 10 * 4; count += 4)
-                       edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
-               retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-               edbg("retval is %lu\n", retval);
-               /* check if the this is sep reply or request */
-               if (retval >> 31) {
-                       edbg("SEP Driver: sep request in\n");
-                       /* request */
-                       mask |= POLLOUT | POLLWRNORM;
-               } else {
-                       edbg("SEP Driver: sep reply in\n");
-                       mask |= POLLIN | POLLRDNORM;
-               }
-       }
-       dbg("SEP Driver:<-------- poll exit\n");
-       return mask;
-}
-
-/**
- *     sep_time_address        -       address in SEP memory of time
- *     @sep: SEP device we want the address from
- *
- *     Return the address of the two dwords in memory used for time
- *     setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
-       return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- *     sep_set_time            -       set the SEP time
- *     @sep: the SEP we are setting the time for
- *
- *     Calculates time and sets it at the predefined address.
- *     Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
-       struct timeval time;
-       u32 *time_addr; /* address of time as seen by the kernel */
-
-
-       dbg("sep:sep_set_time start\n");
-
-       do_gettimeofday(&time);
-
-       /* set value in the SYSTEM MEMORY offset */
-       time_addr = sep_time_address(sep);
-
-       time_addr[0] = SEP_TIME_VAL_TOKEN;
-       time_addr[1] = time.tv_sec;
-
-       edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
-       edbg("SEP Driver:time_addr is %p\n", time_addr);
-       edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
-       return time.tv_sec;
-}
-
-/**
- *     sep_dump_message        - dump the message that is pending
- *     @sep: sep device
- *
- *     Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
-       int count;
-       for (count = 0; count < 12 * 4; count += 4)
-               edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- *     sep_send_command_handler        -       kick off a command
- *     @sep: sep being signalled
- *
- *     This function raises interrupt to SEP that signals that is has a new
- *     command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
-       dbg("sep:sep_send_command_handler start\n");
-
-       mutex_lock(&sep_mutex);
-       sep_set_time(sep);
-
-       /* FIXME: flush cache */
-       flush_cache_all();
-
-       sep_dump_message(sep);
-       /* update counter */
-       sep->send_ct++;
-       /* send interrupt to SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-       dbg("SEP Driver:<-------- sep_send_command_handler end\n");
-       mutex_unlock(&sep_mutex);
-       return;
-}
-
-/**
- *     sep_send_reply_command_handler  -       kick off a command reply
- *     @sep: sep being signalled
- *
- *     This function raises interrupt to SEP that signals that is has a new
- *     command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
-       dbg("sep:sep_send_reply_command_handler start\n");
-
-       /* flash cache */
-       flush_cache_all();
-
-       sep_dump_message(sep);
-
-       mutex_lock(&sep_mutex);
-       sep->send_ct++;         /* update counter */
-       /* send the interrupt to SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
-       /* update both counters */
-       sep->send_ct++;
-       sep->reply_ct++;
-       mutex_unlock(&sep_mutex);
-       dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
-  This function handles the allocate data pool memory request
-  This function returns calculates the bus address of the
-  allocated memory, and the offset of this area from the mapped address.
-  Therefore, the FVOs in user space can calculate the exact virtual
-  address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
-                                                       unsigned long arg)
-{
-       int error;
-       struct sep_driver_alloc_t command_args;
-
-       dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* allocate memory */
-       if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       /* set the virtual and bus address */
-       command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-       command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
-       /* write the memory back to the user space */
-       error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* set the allocation */
-       sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
-       dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
-       return error;
-}
-
-/*
-  This function  handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       void *virt_address;
-       unsigned long va;
-       unsigned long app_in_address;
-       unsigned long num_bytes;
-       void *data_pool_area_addr;
-
-       dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
-       /* get the application address */
-       error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
-       if (error)
-               goto end_function;
-
-       /* get the virtual kernel address address */
-       error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
-       if (error)
-               goto end_function;
-       virt_address = (void *)va;
-
-       /* get the number of bytes */
-       error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
-       if (error)
-               goto end_function;
-
-       /* calculate the start of the data pool */
-       data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
-       /* check that the range of the virtual kernel address is correct */
-       if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
-               error = -EINVAL;
-               goto end_function;
-       }
-       /* copy the application data */
-       error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
-       return error;
-}
-
-/*
-  this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       /* virtual address of dest application buffer */
-       unsigned long app_out_address;
-       /* virtual address of the data pool */
-       unsigned long va;
-       void *virt_address;
-       unsigned long num_bytes;
-       void *data_pool_area_addr;
-
-       dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
-       /* get the application address */
-       error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
-       if (error)
-               goto end_function;
-
-       /* get the virtual kernel address address */
-       error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
-       if (error)
-               goto end_function;
-       virt_address = (void *)va;
-
-       /* get the number of bytes */
-       error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
-       if (error)
-               goto end_function;
-
-       /* calculate the start of the data pool */
-       data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-       /* FIXME: These are incomplete all over the driver: what about + len
-          and when doing that also overflows */
-       /* check that the range of the virtual kernel address is correct */
-       if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
-               error = -EINVAL;
-               goto end_function;
-       }
-
-       /* copy the application data */
-       error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
-       return error;
-}
-
-/*
-  This function releases all the application virtual buffer physical pages,
-       that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
-       unsigned long count;
-
-       if (dirtyFlag) {
-               for (count = 0; count < num_pages; count++) {
-                       /* the out array was written, therefore the data was changed */
-                       if (!PageReserved(page_array_ptr[count]))
-                               SetPageDirty(page_array_ptr[count]);
-                       page_cache_release(page_array_ptr[count]);
-               }
-       } else {
-               /* free in pages - the data was only read, therefore no update was done
-                  on those pages */
-               for (count = 0; count < num_pages; count++)
-                       page_cache_release(page_array_ptr[count]);
-       }
-
-       if (page_array_ptr)
-               /* free the array */
-               kfree(page_array_ptr);
-
-       return 0;
-}
-
-/*
-  This function locks all the physical pages of the kernel virtual buffer
-  and construct a basic lli  array, where each entry holds the physical
-  page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
-                                unsigned long kernel_virt_addr,
-                                unsigned long data_size,
-                                unsigned long *num_pages_ptr,
-                                struct sep_lli_entry_t **lli_array_ptr,
-                                struct page ***page_array_ptr)
-{
-       int error = 0;
-       /* the the page of the end address of the user space buffer */
-       unsigned long end_page;
-       /* the page of the start address of the user space buffer */
-       unsigned long start_page;
-       /* the range in pages */
-       unsigned long num_pages;
-       struct sep_lli_entry_t *lli_array;
-       /* next kernel address to map */
-       unsigned long next_kernel_address;
-       unsigned long count;
-
-       dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
-       /* set start and end pages  and num pages */
-       end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
-       start_page = kernel_virt_addr >> PAGE_SHIFT;
-       num_pages = end_page - start_page + 1;
-
-       edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
-       edbg("SEP Driver: data_size is %lu\n", data_size);
-       edbg("SEP Driver: start_page is %lx\n", start_page);
-       edbg("SEP Driver: end_page is %lx\n", end_page);
-       edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
-       lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
-       if (!lli_array) {
-               edbg("SEP Driver: kmalloc for lli_array failed\n");
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       /* set the start address of the first page - app data may start not at
-          the beginning of the page */
-       lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
-       /* check that not all the data is in the first page only */
-       if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
-               lli_array[0].block_size = data_size;
-       else
-               lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
-       /* debug print */
-       dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
-       /* advance the address to the start of the next page */
-       next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
-       /* go from the second page to the prev before last */
-       for (count = 1; count < (num_pages - 1); count++) {
-               lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-               lli_array[count].block_size = PAGE_SIZE;
-
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
-               next_kernel_address += PAGE_SIZE;
-       }
-
-       /* if more then 1 pages locked - then update for the last page size needed */
-       if (num_pages > 1) {
-               /* update the address of the last page */
-               lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
-               /* set the size of the last page */
-               lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
-               if (lli_array[count].block_size == 0) {
-                       dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
-                       dbg("data_size is %lu\n", data_size);
-                       while (1);
-               }
-
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
-       }
-       /* set output params */
-       *lli_array_ptr = lli_array;
-       *num_pages_ptr = num_pages;
-       *page_array_ptr = 0;
-end_function:
-       dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
-       return 0;
-}
-
-/*
-  This function locks all the physical pages of the application virtual buffer
-  and construct a basic lli  array, where each entry holds the physical page
-  address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
-                       unsigned long app_virt_addr,
-                       unsigned long data_size,
-                       unsigned long *num_pages_ptr,
-                       struct sep_lli_entry_t **lli_array_ptr,
-                       struct page ***page_array_ptr)
-{
-       int error = 0;
-       /* the the page of the end address of the user space buffer */
-       unsigned long end_page;
-       /* the page of the start address of the user space buffer */
-       unsigned long start_page;
-       /* the range in pages */
-       unsigned long num_pages;
-       struct page **page_array;
-       struct sep_lli_entry_t *lli_array;
-       unsigned long count;
-       int result;
-
-       dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
-       /* set start and end pages  and num pages */
-       end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
-       start_page = app_virt_addr >> PAGE_SHIFT;
-       num_pages = end_page - start_page + 1;
-
-       edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
-       edbg("SEP Driver: data_size is %lu\n", data_size);
-       edbg("SEP Driver: start_page is %lu\n", start_page);
-       edbg("SEP Driver: end_page is %lu\n", end_page);
-       edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
-       /* allocate array of pages structure pointers */
-       page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
-       if (!page_array) {
-               edbg("SEP Driver: kmalloc for page_array failed\n");
-
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
-       if (!lli_array) {
-               edbg("SEP Driver: kmalloc for lli_array failed\n");
-
-               error = -ENOMEM;
-               goto end_function_with_error1;
-       }
-
-       /* convert the application virtual address into a set of physical */
-       down_read(&current->mm->mmap_sem);
-       result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
-       up_read(&current->mm->mmap_sem);
-
-       /* check the number of pages locked - if not all then exit with error */
-       if (result != num_pages) {
-               dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
-               error = -ENOMEM;
-               goto end_function_with_error2;
-       }
-
-       /* flush the cache */
-       for (count = 0; count < num_pages; count++)
-               flush_dcache_page(page_array[count]);
-
-       /* set the start address of the first page - app data may start not at
-          the beginning of the page */
-       lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
-       /* check that not all the data is in the first page only */
-       if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
-               lli_array[0].block_size = data_size;
-       else
-               lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
-       /* debug print */
-       dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
-       /* go from the second page to the prev before last */
-       for (count = 1; count < (num_pages - 1); count++) {
-               lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-               lli_array[count].block_size = PAGE_SIZE;
-
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
-       }
-
-       /* if more then 1 pages locked - then update for the last page size needed */
-       if (num_pages > 1) {
-               /* update the address of the last page */
-               lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
-               /* set the size of the last page */
-               lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
-               if (lli_array[count].block_size == 0) {
-                       dbg("app_virt_addr is %08lx\n", app_virt_addr);
-                       dbg("data_size is %lu\n", data_size);
-                       while (1);
-               }
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
-                    count, lli_array[count].physical_address,
-                    count, lli_array[count].block_size);
-       }
-
-       /* set output params */
-       *lli_array_ptr = lli_array;
-       *num_pages_ptr = num_pages;
-       *page_array_ptr = page_array;
-       goto end_function;
-
-end_function_with_error2:
-       /* release the cache */
-       for (count = 0; count < num_pages; count++)
-               page_cache_release(page_array[count]);
-       kfree(lli_array);
-end_function_with_error1:
-       kfree(page_array);
-end_function:
-       dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
-       return 0;
-}
-
-
-/*
-  this function calculates the size of data that can be inserted into the lli
-  table from this array the condition is that either the table is full
-  (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
-       unsigned long table_data_size = 0;
-       unsigned long counter;
-
-       /* calculate the data in the out lli table if till we fill the whole
-          table or till the data has ended */
-       for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
-               table_data_size += lli_in_array_ptr[counter].block_size;
-       return table_data_size;
-}
-
-/*
-  this functions builds ont lli table from the lli_array according to
-  the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
-       unsigned long curr_table_data_size;
-       /* counter of lli array entry */
-       unsigned long array_counter;
-
-       dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
-       /* init currrent table data size and lli array entry counter */
-       curr_table_data_size = 0;
-       array_counter = 0;
-       *num_table_entries_ptr = 1;
-
-       edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
-       /* fill the table till table size reaches the needed amount */
-       while (curr_table_data_size < table_data_size) {
-               /* update the number of entries in table */
-               (*num_table_entries_ptr)++;
-
-               lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
-               lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
-               curr_table_data_size += lli_table_ptr->block_size;
-
-               edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
-               edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-               edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
-               /* check for overflow of the table data */
-               if (curr_table_data_size > table_data_size) {
-                       edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
-                       /* update the size of block in the table */
-                       lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
-                       /* update the physical address in the lli array */
-                       lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
-                       /* update the block size left in the lli array */
-                       lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
-               } else
-                       /* advance to the next entry in the lli_array */
-                       array_counter++;
-
-               edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-               edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
-               /* move to the next entry in table */
-               lli_table_ptr++;
-       }
-
-       /* set the info entry to default */
-       lli_table_ptr->physical_address = 0xffffffff;
-       lli_table_ptr->block_size = 0;
-
-       edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
-       edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-       edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
-       /* set the output parameter */
-       *num_processed_entries_ptr += array_counter;
-
-       edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
-       dbg("SEP Driver:<-------- sep_build_lli_table end\n");
-       return;
-}
-
-/*
-  this function goes over the list of the print created tables and
-  prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
-       unsigned long table_count;
-       unsigned long entries_count;
-
-       dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
-       table_count = 1;
-       while ((unsigned long) lli_table_ptr != 0xffffffff) {
-               edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
-               edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
-               /* print entries of the table (without info entry) */
-               for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
-                       edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
-                       edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
-               }
-
-               /* point to the info entry */
-               lli_table_ptr--;
-
-               edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-               edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
-               table_data_size = lli_table_ptr->block_size & 0xffffff;
-               num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
-               lli_table_ptr = (struct sep_lli_entry_t *)
-                   (lli_table_ptr->physical_address);
-
-               edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
-               if ((unsigned long) lli_table_ptr != 0xffffffff)
-                       lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
-               table_count++;
-       }
-       dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
-  This function prepares only input DMA table for synhronic symmetric
-  operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
-                               unsigned long app_virt_addr,
-                               unsigned long data_size,
-                               unsigned long block_size,
-                               unsigned long *lli_table_ptr,
-                               unsigned long *num_entries_ptr,
-                               unsigned long *table_data_size_ptr,
-                               bool isKernelVirtualAddress)
-{
-       /* pointer to the info entry of the table - the last entry */
-       struct sep_lli_entry_t *info_entry_ptr;
-       /* array of pointers ot page */
-       struct sep_lli_entry_t *lli_array_ptr;
-       /* points to the first entry to be processed in the lli_in_array */
-       unsigned long current_entry;
-       /* num entries in the virtual buffer */
-       unsigned long sep_lli_entries;
-       /* lli table pointer */
-       struct sep_lli_entry_t *in_lli_table_ptr;
-       /* the total data in one table */
-       unsigned long table_data_size;
-       /* number of entries in lli table */
-       unsigned long num_entries_in_table;
-       /* next table address */
-       void *lli_table_alloc_addr;
-       unsigned long result;
-
-       dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
-       edbg("SEP Driver:data_size is %lu\n", data_size);
-       edbg("SEP Driver:block_size is %lu\n", block_size);
-
-       /* initialize the pages pointers */
-       sep->in_page_array = 0;
-       sep->in_num_pages = 0;
-
-       if (data_size == 0) {
-               /* special case  - created 2 entries table with zero data */
-               in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
-               /* FIXME: Should the entry below not be for _bus */
-               in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-               in_lli_table_ptr->block_size = 0;
-
-               in_lli_table_ptr++;
-               in_lli_table_ptr->physical_address = 0xFFFFFFFF;
-               in_lli_table_ptr->block_size = 0;
-
-               *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-               *num_entries_ptr = 2;
-               *table_data_size_ptr = 0;
-
-               goto end_function;
-       }
-
-       /* check if the pages are in Kernel Virtual Address layout */
-       if (isKernelVirtualAddress == true)
-               /* lock the pages of the kernel buffer and translate them to pages */
-               result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-       else
-               /* lock the pages of the user buffer and translate them to pages */
-               result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
-       if (result)
-               return result;
-
-       edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
-       current_entry = 0;
-       info_entry_ptr = 0;
-       sep_lli_entries = sep->in_num_pages;
-
-       /* initiate to point after the message area */
-       lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
-       /* loop till all the entries in in array are not processed */
-       while (current_entry < sep_lli_entries) {
-               /* set the new input and output tables */
-               in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
-               lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
-               /* calculate the maximum size of data for input table */
-               table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
-               /* now calculate the table size so that it will be module block size */
-               table_data_size = (table_data_size / block_size) * block_size;
-
-               edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
-               /* construct input lli table */
-               sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
-
-               if (info_entry_ptr == 0) {
-                       /* set the output parameters to physical addresses */
-                       *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       *num_entries_ptr = num_entries_in_table;
-                       *table_data_size_ptr = table_data_size;
-
-                       edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
-               } else {
-                       /* update the info entry of the previous in table */
-                       info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-               }
-
-               /* save the pointer to the info entry of the current tables */
-               info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
-       }
-
-       /* print input tables */
-       sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
-                                  sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
-       /* the array of the pages */
-       kfree(lli_array_ptr);
-end_function:
-       dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
-       return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
-                                     struct sep_lli_entry_t *lli_in_array,
-                                     unsigned long sep_in_lli_entries,
-                                     struct sep_lli_entry_t *lli_out_array,
-                                     unsigned long sep_out_lli_entries,
-                                     unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
-       /* points to the area where next lli table can be allocated: keep void *
-          as there is pointer scaling to fix otherwise */
-       void *lli_table_alloc_addr;
-       /* input lli table */
-       struct sep_lli_entry_t *in_lli_table_ptr;
-       /* output lli table */
-       struct sep_lli_entry_t *out_lli_table_ptr;
-       /* pointer to the info entry of the table - the last entry */
-       struct sep_lli_entry_t *info_in_entry_ptr;
-       /* pointer to the info entry of the table - the last entry */
-       struct sep_lli_entry_t *info_out_entry_ptr;
-       /* points to the first entry to be processed in the lli_in_array */
-       unsigned long current_in_entry;
-       /* points to the first entry to be processed in the lli_out_array */
-       unsigned long current_out_entry;
-       /* max size of the input table */
-       unsigned long in_table_data_size;
-       /* max size of the output table */
-       unsigned long out_table_data_size;
-       /* flag te signifies if this is the first tables build from the arrays */
-       unsigned long first_table_flag;
-       /* the data size that should be in table */
-       unsigned long table_data_size;
-       /* number of etnries in the input table */
-       unsigned long num_entries_in_table;
-       /* number of etnries in the output table */
-       unsigned long num_entries_out_table;
-
-       dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
-       /* initiate to pint after the message area */
-       lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
-       current_in_entry = 0;
-       current_out_entry = 0;
-       first_table_flag = 1;
-       info_in_entry_ptr = 0;
-       info_out_entry_ptr = 0;
-
-       /* loop till all the entries in in array are not processed */
-       while (current_in_entry < sep_in_lli_entries) {
-               /* set the new input and output tables */
-               in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
-               lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
-               /* set the first output tables */
-               out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
-               lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
-               /* calculate the maximum size of data for input table */
-               in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
-               /* calculate the maximum size of data for output table */
-               out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
-               edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
-               edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
-               /* check where the data is smallest */
-               table_data_size = in_table_data_size;
-               if (table_data_size > out_table_data_size)
-                       table_data_size = out_table_data_size;
-
-               /* now calculate the table size so that it will be module block size */
-               table_data_size = (table_data_size / block_size) * block_size;
-
-               dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
-               /* construct input lli table */
-               sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
-
-               /* construct output lli table */
-               sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
-
-               /* if info entry is null - this is the first table built */
-               if (info_in_entry_ptr == 0) {
-                       /* set the output parameters to physical addresses */
-                       *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       *in_num_entries_ptr = num_entries_in_table;
-                       *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
-                       *out_num_entries_ptr = num_entries_out_table;
-                       *table_data_size_ptr = table_data_size;
-
-                       edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
-                       edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
-               } else {
-                       /* update the info entry of the previous in table */
-                       info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
-                       /* update the info entry of the previous in table */
-                       info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
-                       info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
-               }
-
-               /* save the pointer to the info entry of the current tables */
-               info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
-               info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
-               edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
-               edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
-               edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
-       }
-
-       /* print input tables */
-       sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
-                                  sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
-       /* print output tables */
-       sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
-                                  sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
-       dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
-       return 0;
-}
-
-
-/*
-  This function builds input and output DMA tables for synhronic
-  symmetric operations (AES, DES). It also checks that each table
-  is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
-                                      unsigned long app_virt_in_addr,
-                                      unsigned long app_virt_out_addr,
-                                      unsigned long data_size,
-                                      unsigned long block_size,
-                                      unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
-       /* array of pointers of page */
-       struct sep_lli_entry_t *lli_in_array;
-       /* array of pointers of page */
-       struct sep_lli_entry_t *lli_out_array;
-       int result = 0;
-
-       dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
-       /* initialize the pages pointers */
-       sep->in_page_array = 0;
-       sep->out_page_array = 0;
-
-       /* check if the pages are in Kernel Virtual Address layout */
-       if (isKernelVirtualAddress == true) {
-               /* lock the pages of the kernel buffer and translate them to pages */
-               result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
-                       goto end_function;
-               }
-       } else {
-               /* lock the pages of the user buffer and translate them to pages */
-               result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
-                       goto end_function;
-               }
-       }
-
-       if (isKernelVirtualAddress == true) {
-               result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
-                       goto end_function_with_error1;
-               }
-       } else {
-               result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
-                       goto end_function_with_error1;
-               }
-       }
-       edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
-       edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
-       edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
-       /* call the fucntion that creates table from the lli arrays */
-       result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
-       if (result) {
-               edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
-               goto end_function_with_error2;
-       }
-
-       /* fall through - free the lli entry arrays */
-       dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
-       dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
-       dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
-       kfree(lli_out_array);
-end_function_with_error1:
-       kfree(lli_in_array);
-end_function:
-       dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
-       return result;
-
-}
-
-/*
-  this function handles tha request for creation of the DMA table
-  for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
-                                               unsigned long arg)
-{
-       int error;
-       /* command arguments */
-       struct sep_driver_build_sync_table_t command_args;
-
-       dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       edbg("app_in_address is %08lx\n", command_args.app_in_address);
-       edbg("app_out_address is %08lx\n", command_args.app_out_address);
-       edbg("data_size is %lu\n", command_args.data_in_size);
-       edbg("block_size is %lu\n", command_args.block_size);
-
-       /* check if we need to build only input table or input/output */
-       if (command_args.app_out_address)
-               /* prepare input and output tables */
-               error = sep_prepare_input_output_dma_table(sep,
-                                                          command_args.app_in_address,
-                                                          command_args.app_out_address,
-                                                          command_args.data_in_size,
-                                                          command_args.block_size,
-                                                          &command_args.in_table_address,
-                                                          &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-       else
-               /* prepare input tables */
-               error = sep_prepare_input_dma_table(sep,
-                                                   command_args.app_in_address,
-                                                   command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
-       if (error)
-               goto end_function;
-       /* copy to user */
-       if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
-       return error;
-}
-
-/*
-  this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
-       dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
-       /* free input pages array */
-       sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
-       /* free output pages array if needed */
-       if (sep->out_page_array)
-               sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
-       /* reset all the values */
-       sep->in_page_array = 0;
-       sep->out_page_array = 0;
-       sep->in_num_pages = 0;
-       sep->out_num_pages = 0;
-       dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
-       return 0;
-}
-
-/*
-  this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
-                                       unsigned long **table_address_ptr)
-{
-       int error = 0;
-       /* pointer to the id field of the flow dma table */
-       unsigned long *start_table_ptr;
-       /* Do not make start_addr unsigned long * unless fixing the offset
-          computations ! */
-       void *flow_dma_area_start_addr;
-       unsigned long *flow_dma_area_end_addr;
-       /* maximum table size in words */
-       unsigned long table_size_in_words;
-
-       /* find the start address of the flow DMA table area */
-       flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
-       /* set end address of the flow table area */
-       flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
-       /* set table size in words */
-       table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
-       /* set the pointer to the start address of DMA area */
-       start_table_ptr = flow_dma_area_start_addr;
-
-       /* find the space for the next table */
-       while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
-               start_table_ptr += table_size_in_words;
-
-       /* check if we reached the end of floa tables area */
-       if (start_table_ptr >= flow_dma_area_end_addr)
-               error = -1;
-       else
-               *table_address_ptr = start_table_ptr;
-
-       return error;
-}
-
-/*
-  This function creates one DMA table for flow and returns its data,
-  and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
-                                       unsigned long virt_buff_addr,
-                                       unsigned long virt_buff_size,
-                                       struct sep_lli_entry_t *table_data,
-                                       struct sep_lli_entry_t **info_entry_ptr,
-                                       struct sep_flow_context_t *flow_data_ptr,
-                                       bool isKernelVirtualAddress)
-{
-       int error;
-       /* the range in pages */
-       unsigned long lli_array_size;
-       struct sep_lli_entry_t *lli_array;
-       struct sep_lli_entry_t *flow_dma_table_entry_ptr;
-       unsigned long *start_dma_table_ptr;
-       /* total table data counter */
-       unsigned long dma_table_data_count;
-       /* pointer that will keep the pointer to the pages of the virtual buffer */
-       struct page **page_array_ptr;
-       unsigned long entry_count;
-
-       /* find the space for the new table */
-       error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
-       if (error)
-               goto end_function;
-
-       /* check if the pages are in Kernel Virtual Address layout */
-       if (isKernelVirtualAddress == true)
-               /* lock kernel buffer in the memory */
-               error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-       else
-               /* lock user buffer in the memory */
-               error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
-       if (error)
-               goto end_function;
-
-       /* set the pointer to page array at the beginning of table - this table is
-          now considered taken */
-       *start_dma_table_ptr = lli_array_size;
-
-       /* point to the place of the pages pointers of the table */
-       start_dma_table_ptr++;
-
-       /* set the pages pointer */
-       *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
-       /* set the pointer to the first entry */
-       flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
-       /* now create the entries for table */
-       for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
-               flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
-               flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
-               /* set the total data of a table */
-               dma_table_data_count += lli_array[entry_count].block_size;
-
-               flow_dma_table_entry_ptr++;
-       }
-
-       /* set the physical address */
-       table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
-       /* set the num_entries and total data size */
-       table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
-       /* set the info entry */
-       flow_dma_table_entry_ptr->physical_address = 0xffffffff;
-       flow_dma_table_entry_ptr->block_size = 0;
-
-       /* set the pointer to info entry */
-       *info_entry_ptr = flow_dma_table_entry_ptr;
-
-       /* the array of the lli entries */
-       kfree(lli_array);
-end_function:
-       return error;
-}
-
-
-
-/*
-  This function creates a list of tables for flow and returns the data for
-       the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
-                                       unsigned long num_virtual_buffers,
-                                       unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
-       int error;
-       unsigned long virt_buff_addr;
-       unsigned long virt_buff_size;
-       struct sep_lli_entry_t table_data;
-       struct sep_lli_entry_t *info_entry_ptr;
-       struct sep_lli_entry_t *prev_info_entry_ptr;
-       unsigned long i;
-
-       /* init vars */
-       error = 0;
-       prev_info_entry_ptr = 0;
-
-       /* init the first table to default */
-       table_data.physical_address = 0xffffffff;
-       first_table_data_ptr->physical_address = 0xffffffff;
-       table_data.block_size = 0;
-
-       for (i = 0; i < num_virtual_buffers; i++) {
-               /* get the virtual buffer address */
-               error = get_user(virt_buff_addr, &first_buff_addr);
-               if (error)
-                       goto end_function;
-
-               /* get the virtual buffer size */
-               first_buff_addr++;
-               error = get_user(virt_buff_size, &first_buff_addr);
-               if (error)
-                       goto end_function;
-
-               /* advance the address to point to the next pair of address|size */
-               first_buff_addr++;
-
-               /* now prepare the one flow LLI table from the data */
-               error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
-               if (error)
-                       goto end_function;
-
-               if (i == 0) {
-                       /* if this is the first table - save it to return to the user
-                          application */
-                       *first_table_data_ptr = table_data;
-
-                       /* set the pointer to info entry */
-                       prev_info_entry_ptr = info_entry_ptr;
-               } else {
-                       /* not first table - the previous table info entry should
-                          be updated */
-                       prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
-                       /* set the pointer to info entry */
-                       prev_info_entry_ptr = info_entry_ptr;
-               }
-       }
-
-       /* set the last table data */
-       *last_table_data_ptr = table_data;
-end_function:
-       return error;
-}
-
-/*
-  this function goes over all the flow tables connected to the given
-       table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
-       /* id pointer */
-       unsigned long *table_ptr;
-       /* end address of the flow dma area */
-       unsigned long num_entries;
-       unsigned long num_pages;
-       struct page **pages_ptr;
-       /* maximum table size in words */
-       struct sep_lli_entry_t *info_entry_ptr;
-
-       /* set the pointer to the first table */
-       table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
-       /* set the num of entries */
-       num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
-           & SEP_NUM_ENTRIES_MASK;
-
-       /* go over all the connected tables */
-       while (*table_ptr != 0xffffffff) {
-               /* get number of pages */
-               num_pages = *(table_ptr - 2);
-
-               /* get the pointer to the pages */
-               pages_ptr = (struct page **) (*(table_ptr - 1));
-
-               /* free the pages */
-               sep_free_dma_pages(pages_ptr, num_pages, 1);
-
-               /* goto to the info entry */
-               info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
-               table_ptr = (unsigned long *) info_entry_ptr->physical_address;
-               num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-       }
-
-       return;
-}
-
-/**
- *     sep_find_flow_context   -       find a flow
- *     @sep: the SEP we are working with
- *     @flow_id: flow identifier
- *
- *     Returns a pointer the matching flow, or NULL if the flow does not
- *     exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
-                               unsigned long flow_id)
-{
-       int count;
-       /*
-        *  always search for flow with id default first - in case we
-        *  already started working on the flow there can be no situation
-        *  when 2 flows are with default flag
-        */
-       for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
-               if (sep->flows[count].flow_id == flow_id)
-                       return &sep->flows[count];
-       }
-       return NULL;
-}
-
-
-/*
-  this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
-                                                       unsigned long arg)
-{
-       int error = -ENOENT;
-       struct sep_driver_build_flow_table_t command_args;
-       /* first table - output */
-       struct sep_lli_entry_t first_table_data;
-       /* dma table data */
-       struct sep_lli_entry_t last_table_data;
-       /* pointer to the info entry of the previuos DMA table */
-       struct sep_lli_entry_t *prev_info_entry_ptr;
-       /* pointer to the flow data strucutre */
-       struct sep_flow_context_t *flow_context_ptr;
-
-       dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
-       /* init variables */
-       prev_info_entry_ptr = 0;
-       first_table_data.physical_address = 0xffffffff;
-
-       /* find the free structure for flow data */
-       error = -EINVAL;
-       flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
-       if (flow_context_ptr == NULL)
-               goto end_function;
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* create flow tables */
-       error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
-       if (error)
-               goto end_function_with_error;
-
-       /* check if flow is static */
-       if (!command_args.flow_type)
-               /* point the info entry of the last to the info entry of the first */
-               last_table_data = first_table_data;
-
-       /* set output params */
-       command_args.first_table_addr = first_table_data.physical_address;
-       command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
-       command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function_with_error;
-       }
-
-       /* all the flow created  - update the flow entry with temp id */
-       flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
-       /* set the processing tables data in the context */
-       if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
-               flow_context_ptr->input_tables_in_process = first_table_data;
-       else
-               flow_context_ptr->output_tables_in_process = first_table_data;
-
-       goto end_function;
-
-end_function_with_error:
-       /* free the allocated tables */
-       sep_deallocated_flow_tables(&first_table_data);
-end_function:
-       dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
-       return error;
-}
-
-/*
-  this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       unsigned long num_entries;
-       struct sep_driver_add_flow_table_t command_args;
-       struct sep_flow_context_t *flow_context_ptr;
-       /* first dma table data */
-       struct sep_lli_entry_t first_table_data;
-       /* last dma table data */
-       struct sep_lli_entry_t last_table_data;
-       /* pointer to the info entry of the current DMA table */
-       struct sep_lli_entry_t *info_entry_ptr;
-
-       dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
-       /* get input parameters */
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* find the flow structure for the flow id */
-       flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
-       if (flow_context_ptr == NULL)
-               goto end_function;
-
-       /* prepare the flow dma tables */
-       error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
-       if (error)
-               goto end_function_with_error;
-
-       /* now check if there is already an existing add table for this flow */
-       if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
-               /* this buffer was for input buffers */
-               if (flow_context_ptr->input_tables_flag) {
-                       /* add table already exists - add the new tables to the end
-                          of the previous */
-                       num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
-                       info_entry_ptr = (struct sep_lli_entry_t *)
-                           (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
-                       /* connect to list of tables */
-                       *info_entry_ptr = first_table_data;
-
-                       /* set the first table data */
-                       first_table_data = flow_context_ptr->first_input_table;
-               } else {
-                       /* set the input flag */
-                       flow_context_ptr->input_tables_flag = 1;
-
-                       /* set the first table data */
-                       flow_context_ptr->first_input_table = first_table_data;
-               }
-               /* set the last table data */
-               flow_context_ptr->last_input_table = last_table_data;
-       } else {                /* this is output tables */
-
-               /* this buffer was for input buffers */
-               if (flow_context_ptr->output_tables_flag) {
-                       /* add table already exists - add the new tables to
-                          the end of the previous */
-                       num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
-                       info_entry_ptr = (struct sep_lli_entry_t *)
-                           (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
-                       /* connect to list of tables */
-                       *info_entry_ptr = first_table_data;
-
-                       /* set the first table data */
-                       first_table_data = flow_context_ptr->first_output_table;
-               } else {
-                       /* set the input flag */
-                       flow_context_ptr->output_tables_flag = 1;
-
-                       /* set the first table data */
-                       flow_context_ptr->first_output_table = first_table_data;
-               }
-               /* set the last table data */
-               flow_context_ptr->last_output_table = last_table_data;
-       }
-
-       /* set output params */
-       command_args.first_table_addr = first_table_data.physical_address;
-       command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
-       command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
-       if (error)
-               error = -EFAULT;
-end_function_with_error:
-       /* free the allocated tables */
-       sep_deallocated_flow_tables(&first_table_data);
-end_function:
-       dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
-       return error;
-}
-
-/*
-  this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       struct sep_driver_add_message_t command_args;
-       struct sep_flow_context_t *flow_context_ptr;
-
-       dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* check input */
-       if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       /* find the flow context */
-       flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
-       if (flow_context_ptr == NULL)
-               goto end_function;
-
-       /* copy the message into context */
-       flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
-       error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
-       return error;
-}
-
-
-/*
-  this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       struct sep_driver_static_pool_addr_t command_args;
-
-       dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
-       /*prepare the output parameters in the struct */
-       command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-       command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
-       edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
-       if (error)
-               error = -EFAULT;
-       dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
-       return error;
-}
-
-/*
-  this address gets the offset of the physical address from the start
-  of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       struct sep_driver_get_mapped_offset_t command_args;
-
-       dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       if (command_args.physical_address < sep->shared_bus) {
-               error = -EINVAL;
-               goto end_function;
-       }
-
-       /*prepare the output parameters in the struct */
-       command_args.offset = command_args.physical_address - sep->shared_bus;
-
-       edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
-       return error;
-}
-
-
-/*
-  ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
-       unsigned long reg_val;
-       unsigned long error = 0;
-
-       dbg("SEP Driver:--------> sep_start_handler start\n");
-
-       /* wait in polling for message from SEP */
-       do
-               reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
-       while (!reg_val);
-
-       /* check the value */
-       if (reg_val == 0x1)
-               /* fatal error - read error status from GPRO */
-               error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-       dbg("SEP Driver:<-------- sep_start_handler end\n");
-       return error;
-}
-
-/*
-  this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
-       unsigned long message_word;
-       unsigned long *message_ptr;
-       struct sep_driver_init_t command_args;
-       unsigned long counter;
-       unsigned long error;
-       unsigned long reg_val;
-
-       dbg("SEP Driver:--------> sep_init_handler start\n");
-       error = 0;
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-       dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
-
-       /* PATCH - configure the DMA to single -burst instead of multi-burst */
-       /*sep_configure_dma_burst(); */
-
-       dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
-       message_ptr = (unsigned long *) command_args.message_addr;
-
-       /* set the base address of the SRAM  */
-       sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
-       for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
-               get_user(message_word, message_ptr);
-               /* write data to SRAM */
-               sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
-               edbg("SEP Driver:message_word is %lu\n", message_word);
-               /* wait for write complete */
-               sep_wait_sram_write(sep);
-       }
-       dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
-       /* signal SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
-       do
-               reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
-       while (!(reg_val & 0xFFFFFFFD));
-
-       dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
-       /* check the value */
-       if (reg_val == 0x1) {
-               edbg("SEP Driver:init failed\n");
-
-               error = sep_read_reg(sep, 0x8060);
-               edbg("SEP Driver:sw monitor is %lu\n", error);
-
-               /* fatal error - read erro status from GPRO */
-               error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-               edbg("SEP Driver:error is %lu\n", error);
-       }
-end_function:
-       dbg("SEP Driver:<-------- sep_init_handler end\n");
-       return error;
-
-}
-
-/*
-  this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
-                                               unsigned long arg)
-{
-       struct sep_driver_realloc_cache_resident_t command_args;
-       int error;
-
-       /* copy cache and resident to the their intended locations */
-       error = sep_load_firmware(sep);
-       if (error)
-               return error;
-
-       command_args.new_base_addr = sep->shared_bus;
-
-       /* find the new base address according to the lowest address between
-          cache, resident and shared area */
-       if (sep->resident_bus < command_args.new_base_addr)
-               command_args.new_base_addr = sep->resident_bus;
-       if (sep->rar_bus < command_args.new_base_addr)
-               command_args.new_base_addr = sep->rar_bus;
-
-       /* set the return parameters */
-       command_args.new_cache_addr = sep->rar_bus;
-       command_args.new_resident_addr = sep->resident_bus;
-
-       /* set the new shared area */
-       command_args.new_shared_area_addr = sep->shared_bus;
-
-       edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
-       edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
-       edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
-       edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
-       /* return to user */
-       if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
-               return -EFAULT;
-       return 0;
-}
-
-/**
- *     sep_get_time_handler    -       time request from user space
- *     @sep: sep we are to set the time for
- *     @arg: pointer to user space arg buffer
- *
- *     This function reports back the time and the address in the SEP
- *     shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
-       struct sep_driver_get_time_t command_args;
-
-       mutex_lock(&sep_mutex);
-       command_args.time_value = sep_set_time(sep);
-       command_args.time_physical_address = (unsigned long)sep_time_address(sep);
-       mutex_unlock(&sep_mutex);
-       if (copy_to_user((void __user *)arg,
-                       &command_args, sizeof(struct sep_driver_get_time_t)))
-                       return -EFAULT;
-       return 0;
-
-}
-
-/*
-  This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
-       dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0                          /*!SEP_DRIVER_POLLING_MODE */
-       /* close IMR */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
-       /* release IRQ line */
-       free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-       /* lock the sep mutex */
-       mutex_unlock(&sep_mutex);
-#endif
-
-       dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
-       return 0;
-}
-
-
-/**
- *     sep_set_flow_id_handler -       handle flow setting
- *     @sep: the SEP we are configuring
- *     @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
-                                               unsigned long flow_id)
-{
-       int error = 0;
-       struct sep_flow_context_t *flow_data_ptr;
-
-       /* find the flow data structure that was just used for creating new flow
-          - its id should be default */
-
-       mutex_lock(&sep_mutex);
-       flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
-       if (flow_data_ptr)
-               flow_data_ptr->flow_id = flow_id;       /* set flow id */
-       else
-               error = -EINVAL;
-       mutex_unlock(&sep_mutex);
-       return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       int error = 0;
-       struct sep_device *sep = filp->private_data;
-
-       dbg("------------>SEP Driver: ioctl start\n");
-
-       edbg("SEP Driver: cmd is %x\n", cmd);
-
-       switch (cmd) {
-       case SEP_IOCSENDSEPCOMMAND:
-               /* send command to SEP */
-               sep_send_command_handler(sep);
-               edbg("SEP Driver: after sep_send_command_handler\n");
-               break;
-       case SEP_IOCSENDSEPRPLYCOMMAND:
-               /* send reply command to SEP */
-               sep_send_reply_command_handler(sep);
-               break;
-       case SEP_IOCALLOCDATAPOLL:
-               /* allocate data pool */
-               error = sep_allocate_data_pool_memory_handler(sep, arg);
-               break;
-       case SEP_IOCWRITEDATAPOLL:
-               /* write data into memory pool */
-               error = sep_write_into_data_pool_handler(sep, arg);
-               break;
-       case SEP_IOCREADDATAPOLL:
-               /* read data from data pool into application memory */
-               error = sep_read_from_data_pool_handler(sep, arg);
-               break;
-       case SEP_IOCCREATESYMDMATABLE:
-               /* create dma table for synhronic operation */
-               error = sep_create_sync_dma_tables_handler(sep, arg);
-               break;
-       case SEP_IOCCREATEFLOWDMATABLE:
-               /* create flow dma tables */
-               error = sep_create_flow_dma_tables_handler(sep, arg);
-               break;
-       case SEP_IOCFREEDMATABLEDATA:
-               /* free the pages */
-               error = sep_free_dma_table_data_handler(sep);
-               break;
-       case SEP_IOCSETFLOWID:
-               /* set flow id */
-               error = sep_set_flow_id_handler(sep, (unsigned long)arg);
-               break;
-       case SEP_IOCADDFLOWTABLE:
-               /* add tables to the dynamic flow */
-               error = sep_add_flow_tables_handler(sep, arg);
-               break;
-       case SEP_IOCADDFLOWMESSAGE:
-               /* add message of add tables to flow */
-               error = sep_add_flow_tables_message_handler(sep, arg);
-               break;
-       case SEP_IOCSEPSTART:
-               /* start command to sep */
-               error = sep_start_handler(sep);
-               break;
-       case SEP_IOCSEPINIT:
-               /* init command to sep */
-               error = sep_init_handler(sep, arg);
-               break;
-       case SEP_IOCGETSTATICPOOLADDR:
-               /* get the physical and virtual addresses of the static pool */
-               error = sep_get_static_pool_addr_handler(sep, arg);
-               break;
-       case SEP_IOCENDTRANSACTION:
-               error = sep_end_transaction_handler(sep, arg);
-               break;
-       case SEP_IOCREALLOCCACHERES:
-               error = sep_realloc_cache_resident_handler(sep, arg);
-               break;
-       case SEP_IOCGETMAPPEDADDROFFSET:
-               error = sep_get_physical_mapped_offset_handler(sep, arg);
-               break;
-       case SEP_IOCGETIME:
-               error = sep_get_time_handler(sep, arg);
-               break;
-       default:
-               error = -ENOTTY;
-               break;
-       }
-       dbg("SEP Driver:<-------- ioctl end\n");
-       return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
-       struct sep_flow_context_t *flow_data_ptr;
-
-       /* obtain the mutex */
-       mutex_lock(&sep_mutex);
-
-       /* get the pointer to context */
-       flow_data_ptr = (struct sep_flow_context_t *) work;
-
-       /* free all the current input tables in sep */
-       sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
-       /* free all the current tables output tables in SEP (if needed) */
-       if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
-               sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
-       /* check if we have additional tables to be sent to SEP only input
-          flag may be checked */
-       if (flow_data_ptr->input_tables_flag) {
-               /* copy the message to the shared RAM and signal SEP */
-               memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
-               sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
-       }
-       mutex_unlock(&sep_mutex);
-}
-/*
-  interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
-       irqreturn_t int_error;
-       unsigned long reg_val;
-       unsigned long flow_id;
-       struct sep_flow_context_t *flow_context_ptr;
-       struct sep_device *sep = dev_id;
-
-       int_error = IRQ_HANDLED;
-
-       /* read the IRR register to check if this is SEP interrupt */
-       reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-       edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
-       /* check if this is the flow interrupt */
-       if (0 /*reg_val & (0x1 << 11) */ ) {
-               /* read GPRO to find out the which flow is done */
-               flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
-               /* find the contex of the flow */
-               flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
-               if (flow_context_ptr == NULL)
-                       goto end_function_with_error;
-
-               /* queue the work */
-               INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
-               queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
-       } else {
-               /* check if this is reply interrupt from SEP */
-               if (reg_val & (0x1 << 13)) {
-                       /* update the counter of reply messages */
-                       sep->reply_ct++;
-                       /* wake up the waiting process */
-                       wake_up(&sep_event);
-               } else {
-                       int_error = IRQ_NONE;
-                       goto end_function;
-               }
-       }
-end_function_with_error:
-       /* clear the interrupt */
-       sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
-       return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
-       u32 reg;
-
-       do {
-               reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
-       } while (reg);
-}
-
-/*
-  PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define         HW_AHB_RD_WR_BURSTS_REG_ADDR            0x0E10UL
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
-       /* request access to registers from SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg)  \n");
-
-       sep_wait_busy(sep);
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop)  \n");
-
-       /* set the DMA burst register to single burst */
-       sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
-       /* release the sep busy */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
-       sep_wait_busy(sep);
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst done  \n");
-
-}
-
-#endif
-
-/*
-  Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int error = 0;
-       struct sep_device *sep;
-       int counter;
-       int size;               /* size of memory for allocation */
-
-       edbg("Sep pci probe starting\n");
-       if (sep_dev != NULL) {
-               dev_warn(&pdev->dev, "only one SEP supported.\n");
-               return -EBUSY;
-       }
-
-       /* enable the device */
-       error = pci_enable_device(pdev);
-       if (error) {
-               edbg("error enabling pci device\n");
-               goto end_function;
-       }
-
-       /* set the pci dev pointer */
-       sep_dev = &sep_instance;
-       sep = &sep_instance;
-
-       edbg("sep->shared_addr = %p\n", sep->shared_addr);
-       /* transaction counter that coordinates the transactions between SEP
-       and HOST */
-       sep->send_ct = 0;
-       /* counter for the messages from sep */
-       sep->reply_ct = 0;
-       /* counter for the number of bytes allocated in the pool
-       for the current transaction */
-       sep->data_pool_bytes_allocated = 0;
-
-       /* calculate the total size for allocation */
-       size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
-           SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
-       /* allocate the shared area */
-       if (sep_map_and_alloc_shared_area(sep, size)) {
-               error = -ENOMEM;
-               /* allocation failed */
-               goto end_function_error;
-       }
-       /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
-       /* Note: this test section will need moving before it could ever
-          work as the registers are not yet mapped ! */
-       /* send the new SHARED MESSAGE AREA to the SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
-       /* poll for SEP response */
-       retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-       while (retval != 0xffffffff && retval != sep->shared_bus)
-               retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
-       /* check the return value (register) */
-       if (retval != sep->shared_bus) {
-               error = -ENOMEM;
-               goto end_function_deallocate_sep_shared_area;
-       }
-#endif
-       /* init the flow contextes */
-       for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
-               sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
-       sep->flow_wq = create_singlethread_workqueue("sepflowwq");
-       if (sep->flow_wq == NULL) {
-               error = -ENOMEM;
-               edbg("sep_driver:flow queue creation failed\n");
-               goto end_function_deallocate_sep_shared_area;
-       }
-       edbg("SEP Driver: create flow workqueue \n");
-       sep->pdev = pci_dev_get(pdev);
-
-       sep->reg_addr = pci_ioremap_bar(pdev, 0);
-       if (!sep->reg_addr) {
-               edbg("sep: ioremap of registers failed.\n");
-               goto end_function_deallocate_sep_shared_area;
-       }
-       edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
-       /* load the rom code */
-       sep_load_rom_code(sep);
-
-       /* set up system base address and shared memory location */
-       sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
-                       2 * SEP_RAR_IO_MEM_REGION_SIZE,
-                       &sep->rar_bus, GFP_KERNEL);
-
-       if (!sep->rar_addr) {
-               edbg("SEP Driver:can't allocate rar\n");
-               goto end_function_uniomap;
-       }
-
-
-       edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-       edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
-       edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
-       /* clear ICR register */
-       sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
-       /* set the IMR register - open only GPR 2 */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-       edbg("SEP Driver: about to call request_irq\n");
-       /* get the interrupt line */
-       error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
-       if (error)
-               goto end_function_free_res;
-       return 0;
-       edbg("SEP Driver: about to write IMR REG_ADDR");
-
-       /* set the IMR register - open only GPR 2 */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
-       dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
-                       sep->rar_addr, sep->rar_bus);
-#endif                         /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
-       iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
-       /* de-allocate shared area */
-       sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
-       sep_dev = NULL;
-end_function:
-       return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
-       {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
-       .name = "sep_sec_driver",
-       .id_table = sep_pci_id_tbl,
-       .probe = sep_probe
-       /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = sep_ioctl,
-       .poll = sep_poll,
-       .open = sep_open,
-       .release = sep_release,
-       .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
-  this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
-       int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
-       if (ret_val) {
-               edbg("sep: major number allocation failed, retval is %d\n",
-                                                               ret_val);
-               return ret_val;
-       }
-       /* init cdev */
-       cdev_init(&sep_cdev, &sep_file_operations);
-       sep_cdev.owner = THIS_MODULE;
-
-       /* register the driver with the kernel */
-       ret_val = cdev_add(&sep_cdev, sep_devno, 1);
-       if (ret_val) {
-               edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
-               /* unregister dev numbers */
-               unregister_chrdev_region(sep_devno, 1);
-       }
-       return ret_val;
-}
-
-
-/*--------------------------------------------------------------
-  init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
-       int ret_val = 0;
-       dbg("SEP Driver:-------->Init start\n");
-       /* FIXME: Probe can occur before we are ready to survive a probe */
-       ret_val = pci_register_driver(&sep_pci_driver);
-       if (ret_val) {
-               edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
-               goto end_function_unregister_from_fs;
-       }
-       /* register driver to fs */
-       ret_val = sep_register_driver_to_fs();
-       if (ret_val)
-               goto end_function_unregister_pci;
-       goto end_function;
-end_function_unregister_pci:
-       pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
-       /* unregister from fs */
-       cdev_del(&sep_cdev);
-       /* unregister dev numbers */
-       unregister_chrdev_region(sep_devno, 1);
-end_function:
-       dbg("SEP Driver:<-------- Init end\n");
-       return ret_val;
-}
-
-
-/*-------------------------------------------------------------
-  exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
-       int size;
-
-       dbg("SEP Driver:--------> Exit start\n");
-
-       /* unregister from fs */
-       cdev_del(&sep_cdev);
-       /* unregister dev numbers */
-       unregister_chrdev_region(sep_devno, 1);
-       /* calculate the total size for de-allocation */
-       size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
-           SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-       /* FIXME: We need to do this in the unload for the device */
-       /* free shared area  */
-       if (sep_dev) {
-               sep_unmap_and_free_shared_area(sep_dev, size);
-               edbg("SEP Driver: free pages SEP SHARED AREA \n");
-               iounmap((void *) sep_dev->reg_addr);
-               edbg("SEP Driver: iounmap \n");
-       }
-       edbg("SEP Driver: release_mem_region \n");
-       dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644 (file)
index 7ef16da..0000000
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- *
- *  sep_driver_api.h - Security Processor Driver api definitions
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_API_H__
-#define __SEP_DRIVER_API_H__
-
-
-
-/*----------------------------------------------------------------
-  IOCTL command defines
-  -----------------------------------------------------------------*/
-
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER                           's'
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND                 _IO(SEP_IOC_MAGIC_NUMBER , 0)
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND             _IO(SEP_IOC_MAGIC_NUMBER , 1)
-
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL                  _IO(SEP_IOC_MAGIC_NUMBER , 2)
-
-/* write to pre-allocated  memory in data pool */
-#define SEP_IOCWRITEDATAPOLL                  _IO(SEP_IOC_MAGIC_NUMBER , 3)
-
-/* read from  pre-allocated  memory in data pool */
-#define SEP_IOCREADDATAPOLL                   _IO(SEP_IOC_MAGIC_NUMBER , 4)
-
-/* create sym dma lli tables */
-#define SEP_IOCCREATESYMDMATABLE              _IO(SEP_IOC_MAGIC_NUMBER , 5)
-
-/* create flow dma lli tables */
-#define SEP_IOCCREATEFLOWDMATABLE             _IO(SEP_IOC_MAGIC_NUMBER , 6)
-
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA                _IO(SEP_IOC_MAGIC_NUMBER , 7)
-
-/* get the static pool area addresses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR               _IO(SEP_IOC_MAGIC_NUMBER , 8)
-
-/* set flow id command */
-#define SEP_IOCSETFLOWID                       _IO(SEP_IOC_MAGIC_NUMBER , 9)
-
-/* add tables to the dynamic flow */
-#define SEP_IOCADDFLOWTABLE                    _IO(SEP_IOC_MAGIC_NUMBER , 10)
-
-/* add flow add tables message */
-#define SEP_IOCADDFLOWMESSAGE                  _IO(SEP_IOC_MAGIC_NUMBER , 11)
-
-/* start sep command */
-#define SEP_IOCSEPSTART                        _IO(SEP_IOC_MAGIC_NUMBER , 12)
-
-/* init sep command */
-#define SEP_IOCSEPINIT                         _IO(SEP_IOC_MAGIC_NUMBER , 13)
-
-/* end transaction command */
-#define SEP_IOCENDTRANSACTION                  _IO(SEP_IOC_MAGIC_NUMBER , 15)
-
-/* reallocate cache and resident */
-#define SEP_IOCREALLOCCACHERES                 _IO(SEP_IOC_MAGIC_NUMBER , 16)
-
-/* get the offset of the address starting from the beginnnig of the map area */
-#define SEP_IOCGETMAPPEDADDROFFSET             _IO(SEP_IOC_MAGIC_NUMBER , 17)
-
-/* get time address and value */
-#define SEP_IOCGETIME                          _IO(SEP_IOC_MAGIC_NUMBER , 19)
-
-/*-------------------------------------------
-    TYPEDEFS
-----------------------------------------------*/
-
-/*
-  init command struct
-*/
-struct sep_driver_init_t {
-       /* start of the 1G of the host memory address that SEP can access */
-       unsigned long message_addr;
-
-       /* start address of resident */
-       unsigned long message_size_in_words;
-
-};
-
-
-/*
-  realloc cache resident command
-*/
-struct sep_driver_realloc_cache_resident_t {
-       /* new cache address */
-       u64 new_cache_addr;
-       /* new resident address */
-       u64 new_resident_addr;
-       /* new resident address */
-       u64  new_shared_area_addr;
-       /* new base address */
-       u64 new_base_addr;
-};
-
-struct sep_driver_alloc_t {
-       /* virtual address of allocated space */
-       unsigned long offset;
-
-       /* physical address of allocated space */
-       unsigned long phys_address;
-
-       /* number of bytes to allocate */
-       unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_write_t {
-       /* application space address */
-       unsigned long app_address;
-
-       /* address of the data pool */
-       unsigned long datapool_address;
-
-       /* number of bytes to write */
-       unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_read_t {
-       /* application space address */
-       unsigned long app_address;
-
-       /* address of the data pool */
-       unsigned long datapool_address;
-
-       /* number of bytes to read */
-       unsigned long num_bytes;
-};
-
-/*
-*/
-struct sep_driver_build_sync_table_t {
-       /* address value of the data in */
-       unsigned long app_in_address;
-
-       /* size of data in */
-       unsigned long data_in_size;
-
-       /* address of the data out */
-       unsigned long app_out_address;
-
-       /* the size of the block of the operation - if needed,
-          every table will be modulo this parameter */
-       unsigned long block_size;
-
-       /* the physical address of the first input DMA table */
-       unsigned long in_table_address;
-
-       /* number of entries in the first input DMA table */
-       unsigned long in_table_num_entries;
-
-       /* the physical address of the first output DMA table */
-       unsigned long out_table_address;
-
-       /* number of entries in the first output DMA table */
-       unsigned long out_table_num_entries;
-
-       /* data in the first input table */
-       unsigned long table_data_size;
-
-       /* distinct user/kernel layout */
-       bool isKernelVirtualAddress;
-
-};
-
-/*
-*/
-struct sep_driver_build_flow_table_t {
-       /* flow type */
-       unsigned long flow_type;
-
-       /* flag for input output */
-       unsigned long input_output_flag;
-
-       /* address value of the data in */
-       unsigned long virt_buff_data_addr;
-
-       /* size of data in */
-       unsigned long num_virtual_buffers;
-
-       /* the physical address of the first input DMA table */
-       unsigned long first_table_addr;
-
-       /* number of entries in the first input DMA table */
-       unsigned long first_table_num_entries;
-
-       /* data in the first input table */
-       unsigned long first_table_data_size;
-
-       /* distinct user/kernel layout */
-       bool isKernelVirtualAddress;
-};
-
-
-struct sep_driver_add_flow_table_t {
-       /* flow id  */
-       unsigned long flow_id;
-
-       /* flag for input output */
-       unsigned long inputOutputFlag;
-
-       /* address value of the data in */
-       unsigned long virt_buff_data_addr;
-
-       /* size of data in */
-       unsigned long num_virtual_buffers;
-
-       /* address of the first table */
-       unsigned long first_table_addr;
-
-       /* number of entries in the first table */
-       unsigned long first_table_num_entries;
-
-       /* data size of the first table */
-       unsigned long first_table_data_size;
-
-       /* distinct user/kernel layout */
-       bool isKernelVirtualAddress;
-
-};
-
-/*
-  command struct for set flow id
-*/
-struct sep_driver_set_flow_id_t {
-       /* flow id to set */
-       unsigned long flow_id;
-};
-
-
-/* command struct for add tables message */
-struct sep_driver_add_message_t {
-       /* flow id to set */
-       unsigned long flow_id;
-
-       /* message size in bytes */
-       unsigned long message_size_in_bytes;
-
-       /* address of the message */
-       unsigned long message_address;
-};
-
-/* command struct for static pool addresses  */
-struct sep_driver_static_pool_addr_t {
-       /* physical address of the static pool */
-       unsigned long physical_static_address;
-
-       /* virtual address of the static pool */
-       unsigned long virtual_static_address;
-};
-
-/* command struct for getiing offset of the physical address from
-       the start of the mapped area  */
-struct sep_driver_get_mapped_offset_t {
-       /* physical address of the static pool */
-       unsigned long physical_address;
-
-       /* virtual address of the static pool */
-       unsigned long offset;
-};
-
-/* command struct for getting time value and address */
-struct sep_driver_get_time_t {
-       /* physical address of stored time */
-       unsigned long time_physical_address;
-
-       /* value of the stored time */
-       unsigned long time_value;
-};
-
-
-/*
-  structure that represent one entry in the DMA LLI table
-*/
-struct sep_lli_entry_t {
-       /* physical address */
-       unsigned long physical_address;
-
-       /* block size */
-       unsigned long block_size;
-};
-
-/*
-  structure that reperesents data needed for lli table construction
-*/
-struct sep_lli_prepare_table_data_t {
-       /* pointer to the memory where the first lli entry to be built */
-       struct sep_lli_entry_t *lli_entry_ptr;
-
-       /* pointer to the array of lli entries from which the table is to be built */
-       struct sep_lli_entry_t *lli_array_ptr;
-
-       /* number of elements in lli array */
-       int lli_array_size;
-
-       /* number of entries in the created table */
-       int num_table_entries;
-
-       /* number of array entries processed during table creation */
-       int num_array_entries_processed;
-
-       /* the totatl data size in the created table */
-       int lli_table_total_data_size;
-};
-
-/*
-  structure that represent tone table - it is not used in code, jkust
-  to show what table looks like
-*/
-struct sep_lli_table_t {
-       /* number of pages mapped in this tables. If 0 - means that the table
-          is not defined (used as a valid flag) */
-       unsigned long num_pages;
-       /*
-          pointer to array of page pointers that represent the mapping of the
-          virtual buffer defined by the table to the physical memory. If this
-          pointer is NULL, it means that the table is not defined
-          (used as a valid flag)
-        */
-       struct page **table_page_array_ptr;
-
-       /* maximum flow entries in table */
-       struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
-};
-
-
-/*
-  structure for keeping the mapping of the virtual buffer into physical pages
-*/
-struct sep_flow_buffer_data {
-       /* pointer to the array of page structs pointers to the pages of the
-          virtual buffer */
-       struct page **page_array_ptr;
-
-       /* number of pages taken by the virtual buffer */
-       unsigned long num_pages;
-
-       /* this flag signals if this page_array is the last one among many that were
-          sent in one setting to SEP */
-       unsigned long last_page_array_flag;
-};
-
-/*
-  struct that keeps all the data for one flow
-*/
-struct sep_flow_context_t {
-       /*
-          work struct for handling the flow done interrupt in the workqueue
-          this structure must be in the first place, since it will be used
-          forcasting to the containing flow context
-        */
-       struct work_struct flow_wq;
-
-       /* flow id */
-       unsigned long flow_id;
-
-       /* additional input tables exists */
-       unsigned long input_tables_flag;
-
-       /* additional output tables exists */
-       unsigned long output_tables_flag;
-
-       /*  data of the first input file */
-       struct sep_lli_entry_t first_input_table;
-
-       /* data of the first output table */
-       struct sep_lli_entry_t first_output_table;
-
-       /* last input table data */
-       struct sep_lli_entry_t last_input_table;
-
-       /* last output table data */
-       struct sep_lli_entry_t last_output_table;
-
-       /* first list of table */
-       struct sep_lli_entry_t input_tables_in_process;
-
-       /* output table in process (in sep) */
-       struct sep_lli_entry_t output_tables_in_process;
-
-       /* size of messages in bytes */
-       unsigned long message_size_in_bytes;
-
-       /* message */
-       unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
-};
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644 (file)
index 6008fe5..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- *
- *  sep_driver_config.h - Security Processor Driver configuration
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_CONFIG_H__
-#define __SEP_DRIVER_CONFIG_H__
-
-
-/*--------------------------------------
-  DRIVER CONFIGURATION FLAGS
-  -------------------------------------*/
-
-/* if flag is on , then the driver is running in polling and
-       not interrupt mode */
-#define SEP_DRIVER_POLLING_MODE                         1
-
-/* flag which defines if the shared area address should be
-       reconfiged (send to SEP anew) during init of the driver */
-#define SEP_DRIVER_RECONFIG_MESSAGE_AREA                0
-
-/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
-#define SEP_DRIVER_ARM_DEBUG_MODE                       0
-
-/*-------------------------------------------
-       INTERNAL DATA CONFIGURATION
-       -------------------------------------------*/
-
-/* flag for the input array */
-#define SEP_DRIVER_IN_FLAG                              0
-
-/* flag for output array */
-#define SEP_DRIVER_OUT_FLAG                             1
-
-/* maximum number of entries in one LLI tables */
-#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP             8
-
-
-/*--------------------------------------------------------
-       SHARED AREA  memory total size is 36K
-       it is divided is following:
-
-       SHARED_MESSAGE_AREA                     8K         }
-                                                                       }
-       STATIC_POOL_AREA                        4K         } MAPPED AREA ( 24 K)
-                                                                       }
-       DATA_POOL_AREA                          12K        }
-
-       SYNCHRONIC_DMA_TABLES_AREA              5K
-
-       FLOW_DMA_TABLES_AREA                    4K
-
-       SYSTEM_MEMORY_AREA                      3k
-
-       SYSTEM_MEMORY total size is 3k
-       it is divided as following:
-
-       TIME_MEMORY_AREA                     8B
------------------------------------------------------------*/
-
-
-
-/*
-       the maximum length of the message - the rest of the message shared
-       area will be dedicated to the dma lli tables
-*/
-#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES                  (8 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES          (8 * 1024)
-
-/* the size of the data pool static area in pages */
-#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES                  (4 * 1024)
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES        (12 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES   (1024 * 5)
-
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES         (1024 * 4)
-
-/* system data (time, caller id etc') pool */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES           100
-
-
-/* area size that is mapped  - we map the MESSAGE AREA, STATIC POOL and
-       DATA POOL areas. area must be module 4k */
-#define SEP_DRIVER_MMMAP_AREA_SIZE                            (1024 * 24)
-
-
-/*-----------------------------------------------
-       offsets of the areas starting from the shared area start address
-*/
-
-/* message area offset */
-#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES               0
-
-/* static pool area offset */
-#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
-               (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
-
-/* data pool area offset */
-#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
-       (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
-
-/* synhronic dma tables area offset */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
-       (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
-
-/* sep driver flow dma tables area offset */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
-       (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* system memory offset in bytes */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
-       (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* offset of the time area */
-#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
-       (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
-
-
-
-/* start physical address of the SEP registers memory in HOST */
-#define SEP_IO_MEM_REGION_START_ADDRESS                       0x80000000
-
-/* size of the SEP registers memory region  in HOST (for now 100 registers) */
-#define SEP_IO_MEM_REGION_SIZE                                (2 * 0x100000)
-
-/* define the number of IRQ for SEP interrupts */
-#define SEP_DIRVER_IRQ_NUM                                    1
-
-/* maximum number of add buffers */
-#define SEP_MAX_NUM_ADD_BUFFERS                               100
-
-/* number of flows */
-#define SEP_DRIVER_NUM_FLOWS                                  4
-
-/* maximum number of entries in flow table */
-#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE              25
-
-/* offset of the num entries in the block length entry of the LLI */
-#define SEP_NUM_ENTRIES_OFFSET_IN_BITS                        24
-
-/* offset of the interrupt flag in the block length entry of the LLI */
-#define SEP_INT_FLAG_OFFSET_IN_BITS                           31
-
-/* mask for extracting data size from LLI */
-#define SEP_TABLE_DATA_SIZE_MASK                              0xFFFFFF
-
-/* mask for entries after being shifted left */
-#define SEP_NUM_ENTRIES_MASK                                  0x7F
-
-/* default flow id */
-#define SEP_FREE_FLOW_ID                                      0xFFFFFFFF
-
-/* temp flow id used during cretiong of new flow until receiving
-       real flow id from sep */
-#define SEP_TEMP_FLOW_ID                   (SEP_DRIVER_NUM_FLOWS + 1)
-
-/* maximum add buffers message length in bytes */
-#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES                   (7 * 4)
-
-/* maximum number of concurrent virtual buffers */
-#define SEP_MAX_VIRT_BUFFERS_CONCURRENT                       100
-
-/* the token that defines the start of time address */
-#define SEP_TIME_VAL_TOKEN                                    0x12345678
-
-/* DEBUG LEVEL MASKS */
-#define SEP_DEBUG_LEVEL_BASIC       0x1
-
-#define SEP_DEBUG_LEVEL_EXTENDED    0x4
-
-
-/* Debug helpers */
-
-#define dbg(fmt, args...) \
-do {\
-       if (debug & SEP_DEBUG_LEVEL_BASIC) \
-               printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-#define edbg(fmt, args...) \
-do { \
-       if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
-               printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644 (file)
index ea6abd8..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- *
- *  sep_driver_hw_defs.h - Security Processor Driver hardware definitions
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#ifndef SEP_DRIVER_HW_DEFS__H
-#define SEP_DRIVER_HW_DEFS__H
-
-/*--------------------------------------------------------------------------*/
-/* Abstract: HW Registers Defines.                                          */
-/*                                                                          */
-/* Note: This file was automatically created !!!                            */
-/*       DO NOT EDIT THIS FILE !!!                                          */
-/*--------------------------------------------------------------------------*/
-
-
-/* cf registers */
-#define         HW_R0B_ADDR_0_REG_ADDR                  0x0000UL
-#define         HW_R0B_ADDR_1_REG_ADDR                  0x0004UL
-#define         HW_R0B_ADDR_2_REG_ADDR                  0x0008UL
-#define         HW_R0B_ADDR_3_REG_ADDR                  0x000cUL
-#define         HW_R0B_ADDR_4_REG_ADDR                  0x0010UL
-#define         HW_R0B_ADDR_5_REG_ADDR                  0x0014UL
-#define         HW_R0B_ADDR_6_REG_ADDR                  0x0018UL
-#define         HW_R0B_ADDR_7_REG_ADDR                  0x001cUL
-#define         HW_R0B_ADDR_8_REG_ADDR                  0x0020UL
-#define         HW_R2B_ADDR_0_REG_ADDR                  0x0080UL
-#define         HW_R2B_ADDR_1_REG_ADDR                  0x0084UL
-#define         HW_R2B_ADDR_2_REG_ADDR                  0x0088UL
-#define         HW_R2B_ADDR_3_REG_ADDR                  0x008cUL
-#define         HW_R2B_ADDR_4_REG_ADDR                  0x0090UL
-#define         HW_R2B_ADDR_5_REG_ADDR                  0x0094UL
-#define         HW_R2B_ADDR_6_REG_ADDR                  0x0098UL
-#define         HW_R2B_ADDR_7_REG_ADDR                  0x009cUL
-#define         HW_R2B_ADDR_8_REG_ADDR                  0x00a0UL
-#define         HW_R3B_REG_ADDR                         0x00C0UL
-#define         HW_R4B_REG_ADDR                         0x0100UL
-#define         HW_CSA_ADDR_0_REG_ADDR                  0x0140UL
-#define         HW_CSA_ADDR_1_REG_ADDR                  0x0144UL
-#define         HW_CSA_ADDR_2_REG_ADDR                  0x0148UL
-#define         HW_CSA_ADDR_3_REG_ADDR                  0x014cUL
-#define         HW_CSA_ADDR_4_REG_ADDR                  0x0150UL
-#define         HW_CSA_ADDR_5_REG_ADDR                  0x0154UL
-#define         HW_CSA_ADDR_6_REG_ADDR                  0x0158UL
-#define         HW_CSA_ADDR_7_REG_ADDR                  0x015cUL
-#define         HW_CSA_ADDR_8_REG_ADDR                  0x0160UL
-#define         HW_CSA_REG_ADDR                         0x0140UL
-#define         HW_SINB_REG_ADDR                        0x0180UL
-#define         HW_SOUTB_REG_ADDR                       0x0184UL
-#define         HW_PKI_CONTROL_REG_ADDR                 0x01C0UL
-#define         HW_PKI_STATUS_REG_ADDR                  0x01C4UL
-#define         HW_PKI_BUSY_REG_ADDR                0x01C8UL
-#define         HW_PKI_A_1025_REG_ADDR                  0x01CCUL
-#define         HW_PKI_SDMA_CTL_REG_ADDR                0x01D0UL
-#define         HW_PKI_SDMA_OFFSET_REG_ADDR     0x01D4UL
-#define         HW_PKI_SDMA_POINTERS_REG_ADDR   0x01D8UL
-#define         HW_PKI_SDMA_DLENG_REG_ADDR              0x01DCUL
-#define         HW_PKI_SDMA_EXP_POINTERS_REG_ADDR       0x01E0UL
-#define         HW_PKI_SDMA_RES_POINTERS_REG_ADDR       0x01E4UL
-#define         HW_PKI_CLR_REG_ADDR                     0x01E8UL
-#define         HW_PKI_SDMA_BUSY_REG_ADDR                   0x01E8UL
-#define         HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR        0x01ECUL
-#define         HW_PKI_SDMA_MUL_BY1_REG_ADDR            0x01F0UL
-#define         HW_PKI_SDMA_RMUL_SEL_REG_ADDR           0x01F4UL
-#define         HW_DES_KEY_0_REG_ADDR                   0x0208UL
-#define         HW_DES_KEY_1_REG_ADDR                   0x020CUL
-#define         HW_DES_KEY_2_REG_ADDR                   0x0210UL
-#define         HW_DES_KEY_3_REG_ADDR                   0x0214UL
-#define         HW_DES_KEY_4_REG_ADDR                   0x0218UL
-#define         HW_DES_KEY_5_REG_ADDR                   0x021CUL
-#define         HW_DES_CONTROL_0_REG_ADDR               0x0220UL
-#define         HW_DES_CONTROL_1_REG_ADDR               0x0224UL
-#define         HW_DES_IV_0_REG_ADDR                    0x0228UL
-#define         HW_DES_IV_1_REG_ADDR                    0x022CUL
-#define         HW_AES_KEY_0_ADDR_0_REG_ADDR            0x0400UL
-#define         HW_AES_KEY_0_ADDR_1_REG_ADDR            0x0404UL
-#define         HW_AES_KEY_0_ADDR_2_REG_ADDR            0x0408UL
-#define         HW_AES_KEY_0_ADDR_3_REG_ADDR            0x040cUL
-#define         HW_AES_KEY_0_ADDR_4_REG_ADDR            0x0410UL
-#define         HW_AES_KEY_0_ADDR_5_REG_ADDR            0x0414UL
-#define         HW_AES_KEY_0_ADDR_6_REG_ADDR            0x0418UL
-#define         HW_AES_KEY_0_ADDR_7_REG_ADDR            0x041cUL
-#define         HW_AES_KEY_0_REG_ADDR                   0x0400UL
-#define         HW_AES_IV_0_ADDR_0_REG_ADDR             0x0440UL
-#define         HW_AES_IV_0_ADDR_1_REG_ADDR             0x0444UL
-#define         HW_AES_IV_0_ADDR_2_REG_ADDR             0x0448UL
-#define         HW_AES_IV_0_ADDR_3_REG_ADDR             0x044cUL
-#define         HW_AES_IV_0_REG_ADDR                    0x0440UL
-#define         HW_AES_CTR1_ADDR_0_REG_ADDR             0x0460UL
-#define         HW_AES_CTR1_ADDR_1_REG_ADDR             0x0464UL
-#define         HW_AES_CTR1_ADDR_2_REG_ADDR             0x0468UL
-#define         HW_AES_CTR1_ADDR_3_REG_ADDR             0x046cUL
-#define         HW_AES_CTR1_REG_ADDR                    0x0460UL
-#define         HW_AES_SK_REG_ADDR                      0x0478UL
-#define         HW_AES_MAC_OK_REG_ADDR                      0x0480UL
-#define         HW_AES_PREV_IV_0_ADDR_0_REG_ADDR        0x0490UL
-#define         HW_AES_PREV_IV_0_ADDR_1_REG_ADDR        0x0494UL
-#define         HW_AES_PREV_IV_0_ADDR_2_REG_ADDR        0x0498UL
-#define         HW_AES_PREV_IV_0_ADDR_3_REG_ADDR        0x049cUL
-#define         HW_AES_PREV_IV_0_REG_ADDR                   0x0490UL
-#define         HW_AES_CONTROL_REG_ADDR                     0x04C0UL
-#define         HW_HASH_H0_REG_ADDR                 0x0640UL
-#define         HW_HASH_H1_REG_ADDR                 0x0644UL
-#define         HW_HASH_H2_REG_ADDR                 0x0648UL
-#define         HW_HASH_H3_REG_ADDR                 0x064CUL
-#define         HW_HASH_H4_REG_ADDR                 0x0650UL
-#define         HW_HASH_H5_REG_ADDR                 0x0654UL
-#define         HW_HASH_H6_REG_ADDR                 0x0658UL
-#define         HW_HASH_H7_REG_ADDR                 0x065CUL
-#define         HW_HASH_H8_REG_ADDR                 0x0660UL
-#define         HW_HASH_H9_REG_ADDR                 0x0664UL
-#define         HW_HASH_H10_REG_ADDR                0x0668UL
-#define         HW_HASH_H11_REG_ADDR                0x066CUL
-#define         HW_HASH_H12_REG_ADDR                0x0670UL
-#define         HW_HASH_H13_REG_ADDR                0x0674UL
-#define         HW_HASH_H14_REG_ADDR                0x0678UL
-#define         HW_HASH_H15_REG_ADDR                0x067CUL
-#define         HW_HASH_CONTROL_REG_ADDR                0x07C0UL
-#define         HW_HASH_PAD_EN_REG_ADDR                 0x07C4UL
-#define         HW_HASH_PAD_CFG_REG_ADDR                0x07C8UL
-#define         HW_HASH_CUR_LEN_0_REG_ADDR      0x07CCUL
-#define         HW_HASH_CUR_LEN_1_REG_ADDR      0x07D0UL
-#define         HW_HASH_CUR_LEN_2_REG_ADDR      0x07D4UL
-#define         HW_HASH_CUR_LEN_3_REG_ADDR      0x07D8UL
-#define         HW_HASH_PARAM_REG_ADDR                  0x07DCUL
-#define         HW_HASH_INT_BUSY_REG_ADDR               0x07E0UL
-#define         HW_HASH_SW_RESET_REG_ADDR               0x07E4UL
-#define         HW_HASH_ENDIANESS_REG_ADDR      0x07E8UL
-#define         HW_HASH_DATA_REG_ADDR               0x07ECUL
-#define         HW_DRNG_CONTROL_REG_ADDR                0x0800UL
-#define         HW_DRNG_VALID_REG_ADDR                  0x0804UL
-#define         HW_DRNG_DATA_REG_ADDR               0x0808UL
-#define         HW_RND_SRC_EN_REG_ADDR                  0x080CUL
-#define         HW_AES_CLK_ENABLE_REG_ADDR      0x0810UL
-#define         HW_DES_CLK_ENABLE_REG_ADDR      0x0814UL
-#define         HW_HASH_CLK_ENABLE_REG_ADDR     0x0818UL
-#define         HW_PKI_CLK_ENABLE_REG_ADDR      0x081CUL
-#define         HW_CLK_STATUS_REG_ADDR                  0x0824UL
-#define         HW_CLK_ENABLE_REG_ADDR                  0x0828UL
-#define         HW_DRNG_SAMPLE_REG_ADDR                 0x0850UL
-#define         HW_RND_SRC_CTL_REG_ADDR                 0x0858UL
-#define         HW_CRYPTO_CTL_REG_ADDR                  0x0900UL
-#define         HW_CRYPTO_STATUS_REG_ADDR               0x090CUL
-#define         HW_CRYPTO_BUSY_REG_ADDR                 0x0910UL
-#define         HW_AES_BUSY_REG_ADDR                0x0914UL
-#define         HW_DES_BUSY_REG_ADDR                0x0918UL
-#define         HW_HASH_BUSY_REG_ADDR               0x091CUL
-#define         HW_CONTENT_REG_ADDR                 0x0924UL
-#define         HW_VERSION_REG_ADDR                 0x0928UL
-#define         HW_CONTEXT_ID_REG_ADDR                  0x0930UL
-#define         HW_DIN_BUFFER_REG_ADDR                  0x0C00UL
-#define         HW_DIN_MEM_DMA_BUSY_REG_ADDR    0x0c20UL
-#define         HW_SRC_LLI_MEM_ADDR_REG_ADDR    0x0c24UL
-#define         HW_SRC_LLI_WORD0_REG_ADDR               0x0C28UL
-#define         HW_SRC_LLI_WORD1_REG_ADDR               0x0C2CUL
-#define         HW_SRAM_SRC_ADDR_REG_ADDR               0x0c30UL
-#define         HW_DIN_SRAM_BYTES_LEN_REG_ADDR  0x0c34UL
-#define         HW_DIN_SRAM_DMA_BUSY_REG_ADDR   0x0C38UL
-#define         HW_WRITE_ALIGN_REG_ADDR                 0x0C3CUL
-#define         HW_OLD_DATA_REG_ADDR                0x0C48UL
-#define         HW_WRITE_ALIGN_LAST_REG_ADDR    0x0C4CUL
-#define         HW_DOUT_BUFFER_REG_ADDR                 0x0C00UL
-#define         HW_DST_LLI_WORD0_REG_ADDR               0x0D28UL
-#define         HW_DST_LLI_WORD1_REG_ADDR               0x0D2CUL
-#define         HW_DST_LLI_MEM_ADDR_REG_ADDR    0x0D24UL
-#define         HW_DOUT_MEM_DMA_BUSY_REG_ADDR   0x0D20UL
-#define         HW_SRAM_DEST_ADDR_REG_ADDR      0x0D30UL
-#define         HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define         HW_DOUT_SRAM_DMA_BUSY_REG_ADDR  0x0D38UL
-#define         HW_READ_ALIGN_REG_ADDR                  0x0D3CUL
-#define         HW_READ_LAST_DATA_REG_ADDR      0x0D44UL
-#define         HW_RC4_THRU_CPU_REG_ADDR                0x0D4CUL
-#define         HW_AHB_SINGLE_REG_ADDR                  0x0E00UL
-#define         HW_SRAM_DATA_REG_ADDR               0x0F00UL
-#define         HW_SRAM_ADDR_REG_ADDR               0x0F04UL
-#define         HW_SRAM_DATA_READY_REG_ADDR     0x0F08UL
-#define         HW_HOST_IRR_REG_ADDR                     0x0A00UL
-#define         HW_HOST_IMR_REG_ADDR                     0x0A04UL
-#define         HW_HOST_ICR_REG_ADDR                     0x0A08UL
-#define         HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR  0x0A10UL
-#define         HW_HOST_SEP_BUSY_REG_ADDR                    0x0A14UL
-#define         HW_HOST_SEP_LCS_REG_ADDR                     0x0A18UL
-#define         HW_HOST_CC_SW_RST_REG_ADDR               0x0A40UL
-#define         HW_HOST_SEP_SW_RST_REG_ADDR              0x0A44UL
-#define         HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR        0x0A80UL
-#define         HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR        0x0A84UL
-#define         HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR        0x0A88UL
-#define         HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR        0x0A8cUL
-#define         HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR        0x0A90UL
-#define         HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR        0x0A94UL
-#define         HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR    0x0A98UL
-#define         HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR        0x0A9cUL
-#define         HW_HOST_SEP_HOST_GPR0_REG_ADDR           0x0B00UL
-#define         HW_HOST_SEP_HOST_GPR1_REG_ADDR           0x0B04UL
-#define         HW_HOST_SEP_HOST_GPR2_REG_ADDR           0x0B08UL
-#define         HW_HOST_SEP_HOST_GPR3_REG_ADDR       0x0B0CUL
-#define         HW_HOST_HOST_SEP_GPR0_REG_ADDR       0x0B80UL
-#define         HW_HOST_HOST_SEP_GPR1_REG_ADDR       0x0B84UL
-#define         HW_HOST_HOST_SEP_GPR2_REG_ADDR       0x0B88UL
-#define         HW_HOST_HOST_SEP_GPR3_REG_ADDR       0x0B8CUL
-#define         HW_HOST_HOST_ENDIAN_REG_ADDR         0x0B90UL
-#define         HW_HOST_HOST_COMM_CLK_EN_REG_ADDR        0x0B94UL
-#define         HW_CLR_SRAM_BUSY_REG_REG_ADDR        0x0F0CUL
-#define    HW_CC_SRAM_BASE_ADDRESS              0x5800UL
-
-#endif                         /* ifndef HW_DEFS */
index 5e2ffefb60af9c86c384eff40baaaacca3a279f1..d231ae27299d42b796d7c9f1dec2a83ee2e1694c 100644 (file)
@@ -2,6 +2,7 @@
 menuconfig SPECTRA
        tristate "Denali Spectra Flash Translation Layer"
        depends on BLOCK
+       depends on X86_MRST
        default n
        ---help---
          Enable the FTL pseudo-filesystem used with the NAND Flash
index d0c5c97eda3e6a3791bf91454f63c3a86bf79944..fa21a0fd8e84643549a5de0088874a05cc3f940b 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/kthread.h>
 #include <linux/log2.h>
 #include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
 
 /**** Helper functions used for Div, Remainder operation on u64 ****/
 
@@ -113,7 +115,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
 
 #define GLOB_SBD_NAME          "nd"
 #define GLOB_SBD_IRQ_NUM       (29)
-#define GLOB_VERSION           "driver version 20091110"
 
 #define GLOB_SBD_IOCTL_GC                        (0x7701)
 #define GLOB_SBD_IOCTL_WL                        (0x7702)
@@ -272,13 +273,6 @@ static int get_res_blk_num_os(void)
        return res_blks;
 }
 
-static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
-{
-       rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
-       /* rq->timeout = 5 * HZ; */
-       rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
 /* Transfer a full request. */
 static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
 {
@@ -296,8 +290,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
                        IdentifyDeviceData.PagesPerBlock *
                        res_blks_os;
 
-       if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-                       req->cmd[0] == REQ_LB_OP_FLUSH) {
+       if (req->cmd_type & REQ_FLUSH) {
                if (force_flush_cache()) /* Fail to flush cache */
                        return -EIO;
                else
@@ -597,11 +590,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
        return -ENOTTY;
 }
 
+int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
+               unsigned int cmd, unsigned long arg)
+{
+       int ret;
+
+       lock_kernel();
+       ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
+       unlock_kernel();
+
+       return ret;
+}
+
 static struct block_device_operations GLOB_SBD_ops = {
        .owner = THIS_MODULE,
        .open = GLOB_SBD_open,
        .release = GLOB_SBD_release,
-       .locked_ioctl = GLOB_SBD_ioctl,
+       .ioctl = GLOB_SBD_unlocked_ioctl,
        .getgeo = GLOB_SBD_getgeo,
 };
 
@@ -650,8 +655,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
        /* Here we force report 512 byte hardware sector size to Kernel */
        blk_queue_logical_block_size(dev->queue, 512);
 
-       blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
-                                       SBD_prepare_flush);
+       blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
 
        dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
        if (IS_ERR(dev->thread)) {
index 134aa5166a8d5321cb377c82f0603284e9aaeb8a..9b5218b6ada80095a7a525749c58c0ad619a46f8 100644 (file)
@@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
 static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
                                 u8 cache_blk, u16 flag);
 static int FTL_Cache_Write(void);
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
 static void FTL_Calculate_LRU(void);
 static u32 FTL_Get_Block_Index(u32 wBlockNum);
 
@@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void);
 static int FTL_Replace_Block(u64 blk_addr);
 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
 
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
-
 struct device_info_tag DeviceInfo;
 struct flash_cache_tag Cache;
 static struct spectra_l2_cache_info cache_l2;
@@ -775,7 +772,7 @@ static void dump_cache_l2_table(void)
 {
        struct list_head *p;
        struct spectra_l2_cache_list *pnd;
-       int n, i;
+       int n;
 
        n = 0;
        list_for_each(p, &cache_l2.table.list) {
@@ -1537,79 +1534,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
        return wResult;
 }
 
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function:     FTL_Cache_Update_Block
-* Inputs:       pointer to buffer,page address,block address
-* Outputs:      PASS=0 / FAIL=1
-* Description:  It updates the cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Update_Block(u8 *pData,
-                       u64 old_page_addr, u64 blk_addr)
-{
-       int i, j;
-       u8 *buf = pData;
-       int wResult = PASS;
-       int wFoundInCache;
-       u64 page_addr;
-       u64 addr;
-       u64 old_blk_addr;
-       u16 page_offset;
-
-       nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-                               __FILE__, __LINE__, __func__);
-
-       old_blk_addr = (u64)(old_page_addr >>
-               DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
-       page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
-               DeviceInfo.nBitsInPageDataSize);
-
-       for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
-               page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
-               if (i != page_offset) {
-                       wFoundInCache = FAIL;
-                       for (j = 0; j < CACHE_ITEM_NUM; j++) {
-                               addr = Cache.array[j].address;
-                               addr = FTL_Get_Physical_Block_Addr(addr) +
-                                       GLOB_u64_Remainder(addr, 2);
-                               if ((addr >= page_addr) && addr <
-                                       (page_addr + Cache.cache_item_size)) {
-                                       wFoundInCache = PASS;
-                                       buf = Cache.array[j].buf;
-                                       Cache.array[j].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-                                       int_cache[ftl_cmd_cnt].item = j;
-                                       int_cache[ftl_cmd_cnt].cache.address =
-                                               Cache.array[j].address;
-                                       int_cache[ftl_cmd_cnt].cache.changed =
-                                               Cache.array[j].changed;
-#endif
-#endif
-                                       break;
-                               }
-                       }
-                       if (FAIL == wFoundInCache) {
-                               if (ERR == FTL_Cache_Read_All(g_pTempBuf,
-                                       page_addr)) {
-                                       wResult = FAIL;
-                                       break;
-                               }
-                               buf = g_pTempBuf;
-                       }
-               } else {
-                       buf = pData;
-               }
-
-               if (FAIL == FTL_Cache_Write_All(buf,
-                       blk_addr + (page_addr - old_blk_addr))) {
-                       wResult = FAIL;
-                       break;
-               }
-       }
-
-       return wResult;
-}
-
 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 * Function:     FTL_Copy_Block
 * Inputs:       source block address
@@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void)
 static int erase_l2_cache_blocks(void)
 {
        int i, ret = PASS;
-       u32 pblk, lblk;
+       u32 pblk, lblk = BAD_BLOCK;
        u64 addr;
        u32 *pbt = (u32 *)g_pBlockTable;
 
@@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr)
        return ret;
 }
 
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function:     FTL_Cache_Write_Back
-* Inputs:       pointer to data cached in sys memory
-*               address of free block in flash
-* Outputs:      PASS=0 / FAIL=1
-* Description:  writes all the pages of Cache Block to flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
-{
-       int i, j, iErase;
-       u64 old_page_addr, addr, phy_addr;
-       u32 *pbt = (u32 *)g_pBlockTable;
-       u32 lba;
-       
-       nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-                              __FILE__, __LINE__, __func__);
-
-       old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
-               GLOB_u64_Remainder(blk_addr, 2);
-
-       iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
-
-       pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
-       p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
-       g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
-       p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-       p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
-               DeviceInfo.nBitsInBlockDataSize);
-       p_BTableChangesDelta->BT_Entry_Value =
-               pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
-       p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
-
-       if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-               g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-               FTL_Write_IN_Progress_Block_Table_Page();
-       }
-
-       for (i = 0; i < RETRY_TIMES; i++) {
-               if (PASS == iErase) {
-                       phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-                       if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
-                               lba = BLK_FROM_ADDR(blk_addr);
-                               MARK_BLOCK_AS_BAD(pbt[lba]);
-                               i = RETRY_TIMES;
-                               break;
-                       }
-               }
-
-               for (j = 0; j < CACHE_ITEM_NUM; j++) {
-                       addr = Cache.array[j].address;
-                       if ((addr <= blk_addr) &&
-                               ((addr + Cache.cache_item_size) > blk_addr))
-                               cache_block_to_write = j;
-               }
-
-               phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-               if (PASS == FTL_Cache_Update_Block(pData,
-                                       old_page_addr, phy_addr)) {
-                       cache_block_to_write = UNHIT_CACHE_ITEM;
-                       break;
-               } else {
-                       iErase = PASS;
-               }
-       }
-
-       if (i >= RETRY_TIMES) {
-               if (ERR == FTL_Flash_Error_Handle(pData,
-                                       old_page_addr, blk_addr))
-                       return ERR;
-               else
-                       return FAIL;
-       }
-
-       return PASS;
-}
-
 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 * Function:     FTL_Cache_Write_Page
 * Inputs:       Pointer to buffer, page address, cache block number
@@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce)
        return 1;
 }
 
-/******************************************************************
-* Function:     GLOB_FTL_Flash_Format
-* Inputs:       none
-* Outputs:      PASS
-* Description:  The block table stores bad block info, including MDF+
-*               blocks gone bad over the ages. Therefore, if we have a
-*               block table in place, then use it to scan for bad blocks
-*               If not, then scan for MDF.
-*               Now, a block table will only be found if spectra was already
-*               being used. For a fresh flash, we'll go thru scanning for
-*               MDF. If spectra was being used, then there is a chance that
-*               the MDF has been corrupted. Spectra avoids writing to the
-*               first 2 bytes of the spare area to all pages in a block. This
-*               covers all known flash devices. However, since flash
-*               manufacturers have no standard of where the MDF is stored,
-*               this cannot guarantee that the MDF is protected for future
-*               devices too. The initial scanning for the block table assures
-*               this. It is ok even if the block table is outdated, as all
-*               we're looking for are bad block markers.
-*               Use this when mounting a file system or starting a
-*               new flash.
-*
-*********************************************************************/
-static int  FTL_Format_Flash(u8 valid_block_table)
-{
-       u32 i, j;
-       u32 *pbt = (u32 *)g_pBlockTable;
-       u32 tempNode;
-       int ret;
-
-#if CMD_DMA
-       u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
-       if (ftl_cmd_cnt)
-               return FAIL;
-#endif
-
-       if (FAIL == FTL_Check_Block_Table(FAIL))
-               valid_block_table = 0;
-
-       if (valid_block_table) {
-               u8 switched = 1;
-               u32 block, k;
-
-               k = DeviceInfo.wSpectraStartBlock;
-               while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
-                       switched = 0;
-                       k++;
-                       for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-                       j <= DeviceInfo.wSpectraEndBlock;
-                       j++, i++) {
-                               block = (pbt[i] & ~BAD_BLOCK) -
-                                       DeviceInfo.wSpectraStartBlock;
-                               if (block != i) {
-                                       switched = 1;
-                                       tempNode = pbt[i];
-                                       pbt[i] = pbt[block];
-                                       pbt[block] = tempNode;
-                               }
-                       }
-               }
-               if ((k == DeviceInfo.wSpectraEndBlock) && switched)
-                       valid_block_table = 0;
-       }
-
-       if (!valid_block_table) {
-               memset(g_pBlockTable, 0,
-                       DeviceInfo.wDataBlockNum * sizeof(u32));
-               memset(g_pWearCounter, 0,
-                       DeviceInfo.wDataBlockNum * sizeof(u8));
-               if (DeviceInfo.MLCDevice)
-                       memset(g_pReadCounter, 0,
-                               DeviceInfo.wDataBlockNum * sizeof(u16));
-#if CMD_DMA
-               memset(g_pBTStartingCopy, 0,
-                       DeviceInfo.wDataBlockNum * sizeof(u32));
-               memset(g_pWearCounterCopy, 0,
-                               DeviceInfo.wDataBlockNum * sizeof(u8));
-               if (DeviceInfo.MLCDevice)
-                       memset(g_pReadCounterCopy, 0,
-                               DeviceInfo.wDataBlockNum * sizeof(u16));
-#endif
-               for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-                       j <= DeviceInfo.wSpectraEndBlock;
-                       j++, i++) {
-                       if (GLOB_LLD_Get_Bad_Block((u32)j))
-                               pbt[i] = (u32)(BAD_BLOCK | j);
-               }
-       }
-
-       nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
-
-       for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-               j <= DeviceInfo.wSpectraEndBlock;
-               j++, i++) {
-               if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
-                       ret = GLOB_LLD_Erase_Block(j);
-                       if (FAIL == ret) {
-                               pbt[i] = (u32)(j);
-                               MARK_BLOCK_AS_BAD(pbt[i]);
-                               nand_dbg_print(NAND_DBG_WARN,
-                              "NAND Program fail in %s, Line %d, "
-                              "Function: %s, new Bad Block %d generated!\n",
-                              __FILE__, __LINE__, __func__, (int)j);
-                       } else {
-                               pbt[i] = (u32)(SPARE_BLOCK | j);
-                       }
-               }
-#if CMD_DMA
-               pbtStartingCopy[i] = pbt[i];
-#endif
-       }
-
-       g_wBlockTableOffset = 0;
-       for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
-                       DeviceInfo.wSpectraStartBlock))
-                       && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
-               ;
-       if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
-               printk(KERN_ERR "All blocks bad!\n");
-               return FAIL;
-       } else {
-               g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
-               if (i != BLOCK_TABLE_INDEX) {
-                       tempNode = pbt[i];
-                       pbt[i] = pbt[BLOCK_TABLE_INDEX];
-                       pbt[BLOCK_TABLE_INDEX] = tempNode;
-               }
-       }
-       pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
-       pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-#endif
-
-       g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-       memset(g_pBTBlocks, 0xFF,
-                       (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
-       g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
-       FTL_Write_Block_Table(FAIL);
-
-       for (i = 0; i < CACHE_ITEM_NUM; i++) {
-               Cache.array[i].address = NAND_CACHE_INIT_ADDR;
-               Cache.array[i].use_cnt = 0;
-               Cache.array[i].changed  = CLEAR;
-       }
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-       memcpy((void *)&cache_start_copy, (void *)&Cache,
-                       sizeof(struct flash_cache_tag));
-#endif
-       return PASS;
-}
-
 static int  force_format_nand(void)
 {
        u32 i;
@@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void)
        return wResult;
 }
 
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function:     FTL_Flash_Error_Handle
-* Inputs:       Pointer to data
-*               Page address
-*               Block address
-* Outputs:      PASS=0 / FAIL=1
-* Description:  It handles any error occured during Spectra operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
-                               u64 blk_addr)
-{
-       u32 i;
-       int j;
-       u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
-       u64 phy_addr;
-       int wErase = FAIL;
-       int wResult = FAIL;
-       u32 *pbt = (u32 *)g_pBlockTable;
-
-       nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-                      __FILE__, __LINE__, __func__);
-
-       if (ERR == GLOB_FTL_Garbage_Collection())
-               return ERR;
-
-       do {
-               for (i = DeviceInfo.wSpectraEndBlock -
-                       DeviceInfo.wSpectraStartBlock;
-                                       i > 0; i--) {
-                       if (IS_SPARE_BLOCK(i)) {
-                               tmp_node = (u32)(BAD_BLOCK |
-                                       pbt[blk_node]);
-                               pbt[blk_node] = (u32)(pbt[i] &
-                                       (~SPARE_BLOCK));
-                               pbt[i] = tmp_node;
-#if CMD_DMA
-                               p_BTableChangesDelta =
-                                   (struct BTableChangesDelta *)
-                                   g_pBTDelta_Free;
-                               g_pBTDelta_Free +=
-                                   sizeof(struct BTableChangesDelta);
-
-                               p_BTableChangesDelta->ftl_cmd_cnt =
-                                   ftl_cmd_cnt;
-                               p_BTableChangesDelta->BT_Index =
-                                   blk_node;
-                               p_BTableChangesDelta->BT_Entry_Value =
-                                   pbt[blk_node];
-                               p_BTableChangesDelta->ValidFields = 0x0C;
-
-                               p_BTableChangesDelta =
-                                   (struct BTableChangesDelta *)
-                                   g_pBTDelta_Free;
-                               g_pBTDelta_Free +=
-                                   sizeof(struct BTableChangesDelta);
-
-                               p_BTableChangesDelta->ftl_cmd_cnt =
-                                   ftl_cmd_cnt;
-                               p_BTableChangesDelta->BT_Index = i;
-                               p_BTableChangesDelta->BT_Entry_Value = pbt[i];
-                               p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
-                               wResult = PASS;
-                               break;
-                       }
-               }
-
-               if (FAIL == wResult) {
-                       if (FAIL == GLOB_FTL_Garbage_Collection())
-                               break;
-                       else
-                               continue;
-               }
-
-               if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-                       g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-                       FTL_Write_IN_Progress_Block_Table_Page();
-               }
-
-               phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-
-               for (j = 0; j < RETRY_TIMES; j++) {
-                       if (PASS == wErase) {
-                               if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
-                                       MARK_BLOCK_AS_BAD(pbt[blk_node]);
-                                       break;
-                               }
-                       }
-                       if (PASS == FTL_Cache_Update_Block(pData,
-                                                          old_page_addr,
-                                                          phy_addr)) {
-                               wResult = PASS;
-                               break;
-                       } else {
-                               wResult = FAIL;
-                               wErase = PASS;
-                       }
-               }
-       } while (FAIL == wResult);
-
-       FTL_Write_Block_Table(FAIL);
-
-       return wResult;
-}
-
 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 * Function:     FTL_Get_Page_Num
 * Inputs:       Size in bytes
index 368c30a9d5ffa6da6e7a2065a279fc9825357239..4af83d5318f2705d551cd65fc898645e217e339d 100644 (file)
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
                return -ENOENT;
        params.key_len = len;
        params.key = wlandev->wep_keys[key_index];
+       params.seq_len = 0;
 
        callback(cookie, &params);
 
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
        priv->band.n_channels = ARRAY_SIZE(prism2_channels);
        priv->band.bitrates = priv->rates;
        priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
+       priv->band.band = IEEE80211_BAND_2GHZ;
+       priv->band.ht_cap.ht_supported = false;
        wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
 
        set_wiphy_dev(wiphy, dev);
index 77d4d715a78936b01795667215875adf17781fc2..722c840ac6387456b5825feaf8375195c8bce5ac 100644 (file)
@@ -769,6 +769,7 @@ static int __init zram_init(void)
 free_devices:
        while (dev_id)
                destroy_device(&devices[--dev_id]);
+       kfree(devices);
 unregister:
        unregister_blkdev(zram_major, "zram");
 out:
index 593fc5e2d2e6074da4b9238b4136cc791d9dee0a..5af23cc5ea9fe6cf8c9ded25ccea29f172dc5a8e 100644 (file)
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
 {
        struct cxacru_data *instance;
        struct usb_device *usb_dev = interface_to_usbdev(intf);
+       struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
        int ret;
 
        /* instance init */
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
                goto fail;
        }
 
-       usb_fill_int_urb(instance->rcv_urb,
+       if (!cmd_ep) {
+               dbg("cxacru_bind: no command endpoint");
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+                       == USB_ENDPOINT_XFER_INT) {
+               usb_fill_int_urb(instance->rcv_urb,
                        usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
                        instance->rcv_buf, PAGE_SIZE,
                        cxacru_blocking_completion, &instance->rcv_done, 1);
 
-       usb_fill_int_urb(instance->snd_urb,
+               usb_fill_int_urb(instance->snd_urb,
                        usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
                        instance->snd_buf, PAGE_SIZE,
                        cxacru_blocking_completion, &instance->snd_done, 4);
+       } else {
+               usb_fill_bulk_urb(instance->rcv_urb,
+                       usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
+                       instance->rcv_buf, PAGE_SIZE,
+                       cxacru_blocking_completion, &instance->rcv_done);
+
+               usb_fill_bulk_urb(instance->snd_urb,
+                       usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
+                       instance->snd_buf, PAGE_SIZE,
+                       cxacru_blocking_completion, &instance->snd_done);
+       }
 
        mutex_init(&instance->cm_serialize);
 
index 1833b3a71515bb65f9be08bafb2bf329996dc71d..bc62fae0680fe4bd1ae67e5ef6af8e10374a6b49 100644 (file)
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
        }
 
        if (!buflen) {
-               if (intf->cur_altsetting->endpoint->extralen &&
+               if (intf->cur_altsetting->endpoint &&
+                               intf->cur_altsetting->endpoint->extralen &&
                                intf->cur_altsetting->endpoint->extra) {
                        dev_dbg(&intf->dev,
                                "Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
                USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
                USB_CDC_ACM_PROTO_VENDOR)
 
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
+               USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
+               USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+               USB_CDC_ACM_PROTO_VENDOR)
+
 /*
  * USB driver structure.
  */
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
        { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
        { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
        { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
+       { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
+       { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
+       { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
+       { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
+       { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
+       { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+       { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+       { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
 
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
        .driver_info = NOT_A_MODEM,
                },
 
+       /* control interfaces without any protocol set */
+       { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+               USB_CDC_PROTO_NONE) },
+
        /* control interfaces with various AT-command sets */
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
                USB_CDC_ACM_PROTO_AT_V25TER) },
index fd4c36ea5e4688971afc9c2f32e3668429d0ae63..844683e503830485147910ff16ca035a90194d27 100644 (file)
@@ -1724,6 +1724,15 @@ free_interfaces:
        if (ret)
                goto free_interfaces;
 
+       /* if it's already configured, clear out old state first.
+        * getting rid of old interfaces means unbinding their drivers.
+        */
+       if (dev->state != USB_STATE_ADDRESS)
+               usb_disable_device(dev, 1);     /* Skip ep0 */
+
+       /* Get rid of pending async Set-Config requests for this device */
+       cancel_async_set_config(dev);
+
        /* Make sure we have bandwidth (and available HCD resources) for this
         * configuration.  Remove endpoints from the schedule if we're dropping
         * this configuration to set configuration 0.  After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
        mutex_lock(&hcd->bandwidth_mutex);
        ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
        if (ret < 0) {
-               usb_autosuspend_device(dev);
                mutex_unlock(&hcd->bandwidth_mutex);
+               usb_autosuspend_device(dev);
                goto free_interfaces;
        }
 
-       /* if it's already configured, clear out old state first.
-        * getting rid of old interfaces means unbinding their drivers.
-        */
-       if (dev->state != USB_STATE_ADDRESS)
-               usb_disable_device(dev, 1);     /* Skip ep0 */
-
-       /* Get rid of pending async Set-Config requests for this device */
-       cancel_async_set_config(dev);
-
        ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                              USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
                              NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
        if (!cp) {
                usb_set_device_state(dev, USB_STATE_ADDRESS);
                usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
-               usb_autosuspend_device(dev);
                mutex_unlock(&hcd->bandwidth_mutex);
+               usb_autosuspend_device(dev);
                goto free_interfaces;
        }
        mutex_unlock(&hcd->bandwidth_mutex);
index e483f80822d27003c7da89f0b472a91cde93b110..1160c55de7f280122038aa21ee063ac1cfa06cb3 100644 (file)
@@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
 
 /**
  * usb_string_ids_n() - allocate unused string IDs in batch
- * @cdev: the device whose string descriptor IDs are being allocated
+ * @c: the device whose string descriptor IDs are being allocated
  * @n: number of string IDs to allocate
  * Context: single threaded during gadget setup
  *
  * Returns the first requested ID.  This ID and next @n-1 IDs are now
- * valid IDs.  At least providind that @n is non zore because if it
+ * valid IDs.  At least provided that @n is non-zero because if it
  * is, returns last requested ID which is now very useful information.
  *
  * @usb_string_ids_n() is called from bind() callbacks to allocate
index 166bf71fd3482252e00fde5455fc529e8f22337b..e03058fe23cbc91394474e1130aed8fcaa6c5f84 100644 (file)
@@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev)
        /* initialize ucd */
        m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
        if (m66592 == NULL) {
+               ret = -ENOMEM;
                pr_err("kzalloc error\n");
                goto clean_up;
        }
index 70a817842755484f5ee0b19e51db505b32532bf5..2456ccd9965e3432ab4fd32a33052c104e725698 100644 (file)
@@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
        /* initialize ucd */
        r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
        if (r8a66597 == NULL) {
+               ret = -ENOMEM;
                printk(KERN_ERR "kzalloc error\n");
                goto clean_up;
        }
index 020fa5a25fda8fe7bb6c1ca23f28f90d120bd537..972d5ddd1e18043f23281b3282681c6c9eb0ea62 100644 (file)
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
        /* mandatory */
        case OID_GEN_VENDOR_DESCRIPTION:
                pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
-               length = strlen (rndis_per_dev_params [configNr].vendorDescr);
-               memcpy (outbuf,
-                       rndis_per_dev_params [configNr].vendorDescr, length);
+               if ( rndis_per_dev_params [configNr].vendorDescr ) {
+                       length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+                       memcpy (outbuf,
+                               rndis_per_dev_params [configNr].vendorDescr, length);
+               } else {
+                       outbuf[0] = 0;
+               }
                retval = 0;
                break;
 
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
 
-int __init rndis_init (void)
+int rndis_init(void)
 {
        u8 i;
 
index c236aaa9dcd174db90b7991f719bdda860658b9d..907c33008118b1e880b87fc52166237a2e1152e6 100644 (file)
@@ -262,7 +262,7 @@ int  rndis_signal_disconnect (int configNr);
 int  rndis_state (int configNr);
 extern void rndis_set_host_mac (int configNr, const u8 *addr);
 
-int __devinit rndis_init (void);
+int rndis_init(void);
 void rndis_exit (void);
 
 #endif  /* _LINUX_RNDIS_H */
index 521ebed0118d0fbdc8228ec6d9506abc17fe99e1..a229744a8c7dfa4d5f284165800a455d8f687b26 100644 (file)
@@ -12,8 +12,6 @@
  * published by the Free Software Foundation.
 */
 
-#define DEBUG
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
index 2dcffdac86d29d061a150706cf5bbfc4d95f8064..5e807f083bc820e1251fbb7a08e8c023ed462edd 100644 (file)
@@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
                        break;
        }
 
-       if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+       if (i == ARRAY_SIZE(uvc_formats)) {
                printk(KERN_INFO "Unsupported format 0x%08x.\n",
                        fmt->fmt.pix.pixelformat);
                return -EINVAL;
index 335ee699fd85bde5637fc6991809ddfdab8f506d..ba52be473027a82dd20bcb20aa724f4c4b472f78 100644 (file)
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
        }
 
        rv = usb_add_hcd(hcd, irq, 0);
-       if (rv == 0)
-               return 0;
+       if (rv)
+               goto err_ehci;
+
+       return 0;
 
+err_ehci:
+       if (ehci->has_amcc_usb23)
+               iounmap(ehci->ohci_hcctrl_reg);
        iounmap(hcd->regs);
 err_ioremap:
        irq_dispose_mapping(irq);
 err_irq:
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
-       if (ehci->has_amcc_usb23)
-               iounmap(ehci->ohci_hcctrl_reg);
 err_rmr:
        usb_put_hcd(hcd);
 
index d1a3dfc9a40873ea46df2a81668b065d105fb9cc..bdba8c5d844aa4ba099e128e1ed5f765d2ae349b 100644 (file)
@@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
         * almost immediately. With ISP1761, this register requires a delay of
         * 195ns between a write and subsequent read (see section 15.1.1.3).
         */
+       mmiowb();
        ndelay(195);
        skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
 
@@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
         * almost immediately. With ISP1761, this register requires a delay of
         * 195ns between a write and subsequent read (see section 15.1.1.3).
         */
+       mmiowb();
        ndelay(195);
        skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
 
index bc3f4f427065901059737ca960bd03d0fa75a8fc..48e60d166ff04c3b749607ebab77f1b797321875 100644 (file)
@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
                *seg = (*seg)->next;
                *trb = ((*seg)->trbs);
        } else {
-               *trb = (*trb)++;
+               (*trb)++;
        }
 }
 
@@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        /* calc actual length */
        if (ep->skip) {
                td->urb->iso_frame_desc[idx].actual_length = 0;
+               /* Update ring dequeue pointer */
+               while (ep_ring->dequeue != td->last_trb)
+                       inc_deq(xhci, ep_ring, false);
+               inc_deq(xhci, ep_ring, false);
                return finish_td(xhci, td, event_trb, event, ep, status, true);
        }
 
index d240de097c6211afd6ff2ef1399c1b2add9ca751..801324af9470a08d5b42011bfa397a03d97cb99d 100644 (file)
@@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                        /* drain secondary buffer */
                        int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
                        i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
-                       if (i < 0) {
+                       if (i) {
                                retval = -EFAULT;
                                goto exit;
                        }
index 2de49c8887c5f772b0f166cc7b98354f4ad902ea..bc88c79875a146712cae5fcb7461325501f20ad7 100644 (file)
@@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
                        retval = io_res;
                else {
                        io_res = copy_to_user(user_buffer, buffer, dev->report_size);
-                       if (io_res < 0)
+                       if (io_res)
                                retval = -EFAULT;
                }
                break;
@@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
                        }
                        io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
                                         sizeof(struct iowarrior_info));
-                       if (io_res < 0)
+                       if (io_res)
                                retval = -EFAULT;
                        break;
                }
index 0e8888588d4e01db602762276d0561d15739ed27..05aaac1c3861e5be2f8d30c4311e7faa5a2c40a8 100644 (file)
@@ -550,6 +550,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        struct twl4030_usb_data *pdata = pdev->dev.platform_data;
        struct twl4030_usb      *twl;
        int                     status, err;
+       u8                      pwr;
 
        if (!pdata) {
                dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -568,7 +569,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        twl->otg.set_peripheral = twl4030_set_peripheral;
        twl->otg.set_suspend    = twl4030_set_suspend;
        twl->usb_mode           = pdata->usb_mode;
-       twl->asleep             = 1;
+
+       pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+       twl->asleep             = (pwr & PHY_PWR_PHYPWD);
 
        /* init spinlock for workqueue */
        spin_lock_init(&twl->lock);
index 2bef4415c19c6627ef2555ab9a7f462bb8ed71f2..4f1744c5871fa74443bdae1800e8376b083e561d 100644 (file)
@@ -56,6 +56,7 @@ static int debug;
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
        { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+       { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
        { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
        { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
        { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
        { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+       { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
        { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+       { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
-       { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
-       { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
-       { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
-       { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
        { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
        { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+       { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+       { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+       { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+       { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
 
@@ -222,8 +225,8 @@ static struct usb_serial_driver cp210x_device = {
 #define BITS_STOP_2            0x0002
 
 /* CP210X_SET_BREAK */
-#define BREAK_ON               0x0000
-#define BREAK_OFF              0x0001
+#define BREAK_ON               0x0001
+#define BREAK_OFF              0x0000
 
 /* CP210X_(SET_MHS|GET_MDMSTS) */
 #define CONTROL_DTR            0x0001
index eb12d9b096b4f54f803b9ee34e9302e694a799b8..97cc87d654ce3258a931a8bfbd0bcd36a2d5622b 100644 (file)
@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+       { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -750,6 +751,16 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+       { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -1376,7 +1387,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
        }
 
        /* set max packet size based on descriptor */
-       priv->max_packet_size = ep_desc->wMaxPacketSize;
+       priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
 
        dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
 }
@@ -1831,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
 
        if (port->port.console && port->sysrq) {
                for (i = 0; i < len; i++, ch++) {
-                       if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+                       if (!usb_serial_handle_sysrq_char(port, *ch))
                                tty_insert_flip_char(tty, *ch, flag);
                }
        } else {
index 6e612c52e763a0095bcb4e0f377cf02eaca8b1b8..15a4583775ad236a359c3dec0c02c0aa7f5f7f4e 100644 (file)
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
 
+/* Lenz LI-USB Computer Interface. */
+#define FTDI_LENZ_LIUSB_PID    0xD780
+
 /*
  * Xsens Technologies BV products (http://www.xsens.com).
  */
 #define FTDI_NDI_FUTURE_3_PID          0xDA73  /* NDI future device #3 */
 #define FTDI_NDI_AURORA_SCU_PID                0xDA74  /* NDI Aurora SCU */
 
+/*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+#define FTDI_CHAMSYS_24_MASTER_WING_PID        0xDAF8
+#define FTDI_CHAMSYS_PC_WING_PID       0xDAF9
+#define FTDI_CHAMSYS_USB_DMX_PID       0xDAFA
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
+#define FTDI_CHAMSYS_MINI_WING_PID     0xDAFC
+#define FTDI_CHAMSYS_MAXI_WING_PID     0xDAFD
+#define FTDI_CHAMSYS_MEDIA_WING_PID    0xDAFE
+#define FTDI_CHAMSYS_WING_PID  0xDAFF
+
 /*
  * Westrex International devices submitted by Cory Lee
  */
 #define ALTI2_VID      0x1BC9
 #define ALTI2_N3_PID   0x6001  /* Neptune 3 */
 
+/*
+ * Ionics PlugComputer
+ */
+#define IONICS_VID                     0x1c0c
+#define IONICS_PLUGCOMPUTER_PID                0x0102
+
 /*
  * Dresden Elektronik Sensor Terminal Board
  */
index ca92f67747cc2f2c4b1f5a411bc3763be9a0b009..e6833e216fc9fa4e3628813d57f14891835caf6a 100644 (file)
@@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
                tty_insert_flip_string(tty, ch, urb->actual_length);
        else {
                for (i = 0; i < urb->actual_length; i++, ch++) {
-                       if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+                       if (!usb_serial_handle_sysrq_char(port, *ch))
                                tty_insert_flip_char(tty, *ch, TTY_NORMAL);
                }
        }
@@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
 EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
 
 #ifdef CONFIG_MAGIC_SYSRQ
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
-                       struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
 {
        if (port->sysrq && port->port.console) {
                if (ch && time_before(jiffies, port->sysrq)) {
-                       handle_sysrq(ch, tty);
+                       handle_sysrq(ch);
                        port->sysrq = 0;
                        return 1;
                }
@@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
        return 0;
 }
 #else
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
-                       struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
 {
        return 0;
 }
@@ -518,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial)
        for (i = 0; i < serial->num_ports; ++i)
                generic_cleanup(serial->port[i]);
 }
+EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
 
 void usb_serial_generic_release(struct usb_serial *serial)
 {
index dc47f986df57f739b18ceb121dc8f19b0acd8c23..a7cfc59529377b16f084e6bd6e2bf118d4cc9cb5 100644 (file)
@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
 
                        /* Check if we have an old version in the I2C and
                           update if necessary */
-                       if (download_cur_ver != download_new_ver) {
+                       if (download_cur_ver < download_new_ver) {
                                dbg("%s - Update I2C dld from %d.%d to %d.%d",
                                    __func__,
                                    firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial)
                                kfree(header);
                                kfree(rom_desc);
                                kfree(ti_manuf_desc);
-                               return status;
+                               return -EINVAL;
                        }
 
                        /* Update I2C with type 0xf2 record with correct
index 585b7e6637405981e5019790bbdb0700109f87d0..1c9b6e9b2386e5032da1e3b8a095525222dc8cb5 100644 (file)
  * by making a change here, in moschip_port_id_table, and in
  * moschip_id_table_combined
  */
-#define USB_VENDOR_ID_BANDB             0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2      0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4      0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2       0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4       0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2       0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4       0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2      0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4      0xAC44
-#define BANDB_DEVICE_ID_USOPTL2_4      0xAC24
+#define USB_VENDOR_ID_BANDB              0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2        0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_2P       0xBC00
+#define BANDB_DEVICE_ID_USO9ML2_4        0xAC24
+#define BANDB_DEVICE_ID_USO9ML2_4P       0xBC01
+#define BANDB_DEVICE_ID_US9ML2_2         0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4         0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2         0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4         0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2        0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_2P       0xBC02
+#define BANDB_DEVICE_ID_USOPTL4_4        0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_4P       0xBC03
+#define BANDB_DEVICE_ID_USOPTL2_4        0xAC24
 
 /* This driver also supports
  * ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
index a6b207c84917425db5f4c9ea1a1da075a6798002..1f00f243c26cf5e1825632e40fc9c2a60287f9e3 100644 (file)
@@ -25,6 +25,7 @@ static int debug;
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
+       { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 9fc6ea2c681fae8ce6a43404109ca23c3177a310..c46911af282f01c82eb8cc34e312db994173d38c 100644 (file)
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
 #define YISO_VENDOR_ID                         0x0EAB
 #define YISO_PRODUCT_U893                      0xC893
 
+/*
+ * NOVATEL WIRELESS PRODUCTS
+ *
+ * Note from Novatel Wireless:
+ * If your Novatel modem does not work on linux, don't
+ * change the option module, but check our website. If
+ * that does not help, contact ddeschepper@nvtl.com
+*/
 /* MERLIN EVDO PRODUCTS */
 #define NOVATELWIRELESS_PRODUCT_V640           0x1100
 #define NOVATELWIRELESS_PRODUCT_V620           0x1110
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_EU730          0x2400
 #define NOVATELWIRELESS_PRODUCT_EU740          0x2410
 #define NOVATELWIRELESS_PRODUCT_EU870D         0x2420
-
 /* OVATION PRODUCTS */
 #define NOVATELWIRELESS_PRODUCT_MC727          0x4100
 #define NOVATELWIRELESS_PRODUCT_MC950D         0x4400
-#define NOVATELWIRELESS_PRODUCT_U727           0x5010
-#define NOVATELWIRELESS_PRODUCT_MC727_NEW      0x5100
-#define NOVATELWIRELESS_PRODUCT_MC760          0x6000
+/*
+ * Note from Novatel Wireless:
+ * All PID in the 5xxx range are currently reserved for
+ * auto-install CDROMs, and should not be added to this
+ * module.
+ *
+ * #define NOVATELWIRELESS_PRODUCT_U727                0x5010
+ * #define NOVATELWIRELESS_PRODUCT_MC727_NEW   0x5100
+*/
 #define NOVATELWIRELESS_PRODUCT_OVMC760                0x6002
-
-/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
-#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
-#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED        0X8000
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED        0X8001
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED        0X9000
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0X9001
-#define NOVATELWIRELESS_PRODUCT_GLOBAL         0XA001
+#define NOVATELWIRELESS_PRODUCT_MC780          0x6010
+#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
+#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
+#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3        0x7003
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4        0x7004
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5        0x7005
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6        0x7006
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7        0x7007
+#define NOVATELWIRELESS_PRODUCT_MC996D         0x7030
+#define NOVATELWIRELESS_PRODUCT_MF3470         0x7041
+#define NOVATELWIRELESS_PRODUCT_MC547          0x7042
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED        0x8000
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED        0x8001
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED        0x9000
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0x9001
+#define NOVATELWIRELESS_PRODUCT_G1             0xA001
+#define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
+#define NOVATELWIRELESS_PRODUCT_G2             0xA010
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
@@ -365,6 +388,10 @@ static void option_instat_callback(struct urb *urb);
 #define OLIVETTI_VENDOR_ID                     0x0b3c
 #define OLIVETTI_PRODUCT_OLICARD100            0xc000
 
+/* Celot products */
+#define CELOT_VENDOR_ID                                0x211f
+#define CELOT_PRODUCT_CT680M                   0x6801
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -486,36 +513,44 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
        { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -887,10 +922,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-
        { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+       { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 6b60018222790bed2ec8f77b637a19bfd08caa2a..8ae4c6cbc38a04ea250d43aad68aa43081098245 100644 (file)
@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
        { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+       { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
 
        if (port->port.console && port->sysrq) {
                for (i = 0; i < urb->actual_length; ++i)
-                       if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
+                       if (!usb_serial_handle_sysrq_char(port, data[i]))
                                tty_insert_flip_char(tty, data[i], tty_flag);
        } else {
                tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
index a871645389dd365764485481bc8ba773c6c68f37..43eb9bdad422c91e4290f2204ee2f178d393b002 100644 (file)
 #define CRESSI_VENDOR_ID       0x04b8
 #define CRESSI_EDY_PRODUCT_ID  0x0521
 
+/* Zeagle dive computer interface */
+#define ZEAGLE_VENDOR_ID       0x04b8
+#define ZEAGLE_N2ITION3_PRODUCT_ID     0x0522
+
 /* Sony, USB data cable for CMD-Jxx mobile phones */
 #define SONY_VENDOR_ID         0x054c
 #define SONY_QN3USB_PRODUCT_ID 0x0437
index 6e82d4f54bc87c167e6052119f052ca35bbfcf14..e986002b3844c07d24b292344258286434820d8b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/serial.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
 #include <linux/uaccess.h>
 
 #define QT_OPEN_CLOSE_CHANNEL       0xca
 #define QT_HW_FLOW_CONTROL_MASK     0xc5
 #define QT_SW_FLOW_CONTROL_MASK     0xc6
 
-#define MODEM_CTL_REGISTER         0x04
-#define MODEM_STATUS_REGISTER      0x06
-
-
-#define SERIAL_LSR_OE       0x02
-#define SERIAL_LSR_PE       0x04
-#define SERIAL_LSR_FE       0x08
-#define SERIAL_LSR_BI       0x10
-
-#define SERIAL_LSR_TEMT     0x40
-
-#define  SERIAL_MCR_DTR             0x01
-#define  SERIAL_MCR_RTS             0x02
-#define  SERIAL_MCR_LOOP            0x10
-
-#define  SERIAL_MSR_CTS             0x10
-#define  SERIAL_MSR_CD              0x80
-#define  SERIAL_MSR_RI              0x40
-#define  SERIAL_MSR_DSR             0x20
 #define  SERIAL_MSR_MASK            0xf0
 
-#define  SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS)
+#define  SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
 
-#define  SERIAL_8_DATA              0x03
-#define  SERIAL_7_DATA              0x02
-#define  SERIAL_6_DATA              0x01
-#define  SERIAL_5_DATA              0x00
-
-#define  SERIAL_ODD_PARITY          0X08
-#define  SERIAL_EVEN_PARITY         0X18
+#define  SERIAL_EVEN_PARITY         (UART_LCR_PARITY | UART_LCR_EPAR)
 
 #define  MAX_BAUD_RATE              460800
 
@@ -70,7 +46,7 @@
 #define FULLPWRBIT          0x00000080
 #define NEXT_BOARD_POWER_BIT        0x00000004
 
-static int debug = 1;
+static int debug;
 
 /* Version Information */
 #define DRIVER_VERSION "v0.1"
@@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = {
 };
 
 struct ssu100_port_private {
+       spinlock_t status_lock;
        u8 shadowLSR;
        u8 shadowMSR;
        wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
        unsigned short max_packet_size;
+       struct async_icount icount;
 };
 
 static void ssu100_release(struct usb_serial *serial)
@@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev,
 
 static inline int ssu100_setregister(struct usb_device *dev,
                                     unsigned short uart,
+                                    unsigned short reg,
                                     u16 data)
 {
-       u16 value = (data << 8) | MODEM_CTL_REGISTER;
+       u16 value = (data << 8) | reg;
 
        return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                               QT_SET_GET_REGISTER, 0x40, value, uart,
@@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set,
        clear &= ~set;  /* 'set' takes precedence over 'clear' */
        urb_value = 0;
        if (set & TIOCM_DTR)
-               urb_value |= SERIAL_MCR_DTR;
+               urb_value |= UART_MCR_DTR;
        if (set & TIOCM_RTS)
-               urb_value |= SERIAL_MCR_RTS;
+               urb_value |= UART_MCR_RTS;
 
-       result = ssu100_setregister(dev, 0, urb_value);
+       result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
        if (result < 0)
                dbg("%s Error from MODEM_CTRL urb", __func__);
 
@@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty,
 
        if (cflag & PARENB) {
                if (cflag & PARODD)
-                       urb_value |= SERIAL_ODD_PARITY;
+                       urb_value |= UART_LCR_PARITY;
                else
                        urb_value |= SERIAL_EVEN_PARITY;
        }
 
        switch (cflag & CSIZE) {
        case CS5:
-               urb_value |= SERIAL_5_DATA;
+               urb_value |= UART_LCR_WLEN5;
                break;
        case CS6:
-               urb_value |= SERIAL_6_DATA;
+               urb_value |= UART_LCR_WLEN6;
                break;
        case CS7:
-               urb_value |= SERIAL_7_DATA;
+               urb_value |= UART_LCR_WLEN7;
                break;
        default:
        case CS8:
-               urb_value |= SERIAL_8_DATA;
+               urb_value |= UART_LCR_WLEN8;
                break;
        }
 
@@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
        struct ssu100_port_private *priv = usb_get_serial_port_data(port);
        u8 *data;
        int result;
+       unsigned long flags;
 
        dbg("%s - port %d", __func__, port->number);
 
@@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
                return result;
        }
 
-       priv->shadowLSR = data[0]  & (SERIAL_LSR_OE | SERIAL_LSR_PE |
-                                     SERIAL_LSR_FE | SERIAL_LSR_BI);
-
-       priv->shadowMSR = data[1]  & (SERIAL_MSR_CTS | SERIAL_MSR_DSR |
-                                     SERIAL_MSR_RI | SERIAL_MSR_CD);
+       spin_lock_irqsave(&priv->status_lock, flags);
+       priv->shadowLSR = data[0];
+       priv->shadowMSR = data[1];
+       spin_unlock_irqrestore(&priv->status_lock, flags);
 
        kfree(data);
 
@@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port,
        return 0;
 }
 
+static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+{
+       struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       struct async_icount prev, cur;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->status_lock, flags);
+       prev = priv->icount;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
+
+       while (1) {
+               wait_event_interruptible(priv->delta_msr_wait,
+                                        ((priv->icount.rng != prev.rng) ||
+                                         (priv->icount.dsr != prev.dsr) ||
+                                         (priv->icount.dcd != prev.dcd) ||
+                                         (priv->icount.cts != prev.cts)));
+
+               if (signal_pending(current))
+                       return -ERESTARTSYS;
+
+               spin_lock_irqsave(&priv->status_lock, flags);
+               cur = priv->icount;
+               spin_unlock_irqrestore(&priv->status_lock, flags);
+
+               if ((prev.rng == cur.rng) &&
+                   (prev.dsr == cur.dsr) &&
+                   (prev.dcd == cur.dcd) &&
+                   (prev.cts == cur.cts))
+                       return -EIO;
+
+               if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
+                   (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
+                   (arg & TIOCM_CD  && (prev.dcd != cur.dcd)) ||
+                   (arg & TIOCM_CTS && (prev.cts != cur.cts)))
+                       return 0;
+       }
+       return 0;
+}
+
 static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
                    unsigned int cmd, unsigned long arg)
 {
        struct usb_serial_port *port = tty->driver_data;
        struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       void __user *user_arg = (void __user *)arg;
 
        dbg("%s cmd 0x%04x", __func__, cmd);
 
@@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
                                       (struct serial_struct __user *) arg);
 
        case TIOCMIWAIT:
-               while (priv != NULL) {
-                       u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK;
-                       interruptible_sleep_on(&priv->delta_msr_wait);
-                       /* see if a signal did it */
-                       if (signal_pending(current))
-                               return -ERESTARTSYS;
-                       else {
-                               u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR;
-                               if (!diff)
-                                       return -EIO; /* no change => error */
-
-                               /* Return 0 if caller wanted to know about
-                                  these bits */
-
-                               if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) ||
-                                   ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) ||
-                                   ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) ||
-                                   ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS)))
-                                       return 0;
-                       }
-               }
+               return wait_modem_info(port, arg);
+
+       case TIOCGICOUNT:
+       {
+               struct serial_icounter_struct icount;
+               struct async_icount cnow = priv->icount;
+               memset(&icount, 0, sizeof(icount));
+               icount.cts = cnow.cts;
+               icount.dsr = cnow.dsr;
+               icount.rng = cnow.rng;
+               icount.dcd = cnow.dcd;
+               icount.rx = cnow.rx;
+               icount.tx = cnow.tx;
+               icount.frame = cnow.frame;
+               icount.overrun = cnow.overrun;
+               icount.parity = cnow.parity;
+               icount.brk = cnow.brk;
+               icount.buf_overrun = cnow.buf_overrun;
+               if (copy_to_user(user_arg, &icount, sizeof(icount)))
+                       return -EFAULT;
                return 0;
+       }
 
        default:
                break;
@@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
 
        unsigned num_endpoints;
        int i;
+       unsigned long flags;
 
        num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
        dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
        }
 
        /* set max packet size based on descriptor */
+       spin_lock_irqsave(&priv->status_lock, flags);
        priv->max_packet_size = ep_desc->wMaxPacketSize;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
 
        dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
 }
@@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial)
                return -ENOMEM;
        }
 
+       spin_lock_init(&priv->status_lock);
        init_waitqueue_head(&priv->delta_msr_wait);
        usb_set_serial_port_data(port, priv);
-
        ssu100_set_max_packet_size(port);
 
        return ssu100_initdevice(serial->dev);
@@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file)
        if (!d)
                return -ENOMEM;
 
-       r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d);
+       r = ssu100_getregister(dev, 0, UART_MCR, d);
        if (r < 0)
                goto mget_out;
 
-       r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1);
+       r = ssu100_getregister(dev, 0, UART_MSR, d+1);
        if (r < 0)
                goto mget_out;
 
-       r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) |
-               (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) |
-               (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) |
-               (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) |
-               (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) |
-               (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0);
+       r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
+               (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
+               (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
+               (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
+               (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
+               (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
 
 mget_out:
        kfree(d);
@@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
        if (!port->serial->disconnected) {
                /* Disable flow control */
                if (!on &&
-                   ssu100_setregister(dev, 0, 0) < 0)
+                   ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
                        dev_err(&port->dev, "error from flowcontrol urb\n");
                /* drop RTS and DTR */
                if (on)
@@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
        mutex_unlock(&port->serial->disc_mutex);
 }
 
+static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+{
+       struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->status_lock, flags);
+       priv->shadowMSR = msr;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
+
+       if (msr & UART_MSR_ANY_DELTA) {
+               /* update input line counters */
+               if (msr & UART_MSR_DCTS)
+                       priv->icount.cts++;
+               if (msr & UART_MSR_DDSR)
+                       priv->icount.dsr++;
+               if (msr & UART_MSR_DDCD)
+                       priv->icount.dcd++;
+               if (msr & UART_MSR_TERI)
+                       priv->icount.rng++;
+               wake_up_interruptible(&priv->delta_msr_wait);
+       }
+}
+
+static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
+                             char *tty_flag)
+{
+       struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->status_lock, flags);
+       priv->shadowLSR = lsr;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
+
+       *tty_flag = TTY_NORMAL;
+       if (lsr & UART_LSR_BRK_ERROR_BITS) {
+               /* we always want to update icount, but we only want to
+                * update tty_flag for one case */
+               if (lsr & UART_LSR_BI) {
+                       priv->icount.brk++;
+                       *tty_flag = TTY_BREAK;
+                       usb_serial_handle_break(port);
+               }
+               if (lsr & UART_LSR_PE) {
+                       priv->icount.parity++;
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_PARITY;
+               }
+               if (lsr & UART_LSR_FE) {
+                       priv->icount.frame++;
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_FRAME;
+               }
+               if (lsr & UART_LSR_OE){
+                       priv->icount.overrun++;
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_OVERRUN;
+               }
+       }
+
+}
+
 static int ssu100_process_packet(struct tty_struct *tty,
                                 struct usb_serial_port *port,
                                 struct ssu100_port_private *priv,
                                 char *packet, int len)
 {
        int i;
-       char flag;
+       char flag = TTY_NORMAL;
        char *ch;
 
        dbg("%s - port %d", __func__, port->number);
 
-       if (len < 4) {
-               dbg("%s - malformed packet", __func__);
-               return 0;
-       }
-
-       if ((packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+       if ((len >= 4) &&
+           (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
            ((packet[2] == 0x00) || (packet[2] == 0x01))) {
-               if (packet[2] == 0x00)
-                       priv->shadowLSR = packet[3] & (SERIAL_LSR_OE |
-                                                      SERIAL_LSR_PE |
-                                                      SERIAL_LSR_FE |
-                                                      SERIAL_LSR_BI);
-
-               if (packet[2] == 0x01) {
-                       priv->shadowMSR = packet[3];
-                       wake_up_interruptible(&priv->delta_msr_wait);
+               if (packet[2] == 0x00) {
+                       ssu100_update_lsr(port, packet[3], &flag);
+                       if (flag == TTY_OVERRUN)
+                               tty_insert_flip_char(tty, 0, TTY_OVERRUN);
                }
+               if (packet[2] == 0x01)
+                       ssu100_update_msr(port, packet[3]);
 
                len -= 4;
                ch = packet + 4;
@@ -596,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty,
 
        if (port->port.console && port->sysrq) {
                for (i = 0; i < len; i++, ch++) {
-                       if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+                       if (!usb_serial_handle_sysrq_char(port, *ch))
                                tty_insert_flip_char(tty, *ch, flag);
                }
        } else
@@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb)
        tty_kref_put(tty);
 }
 
-
 static struct usb_serial_driver ssu100_device = {
        .driver = {
                .owner = THIS_MODULE,
@@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = {
        .tiocmset            = ssu100_tiocmset,
        .ioctl               = ssu100_ioctl,
        .set_termios         = ssu100_set_termios,
+       .disconnect          = usb_serial_generic_disconnect,
 };
 
 static int __init ssu100_init(void)
index 2a982e62963b5b76a7bd894c412690b1ddb9381d..7a2177c79bdefb05fe88d6c67ee5b19cd7fae0f5 100644 (file)
@@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface,
 
        serial = create_serial(dev, interface, type);
        if (!serial) {
+               module_put(type->driver.owner);
                dev_err(&interface->dev, "%s - out of memory\n", __func__);
                return -ENOMEM;
        }
@@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface,
 
                id = get_iface_id(type, interface);
                retval = type->probe(serial, id);
-               module_put(type->driver.owner);
 
                if (retval) {
                        dbg("sub driver rejected device");
                        kfree(serial);
+                       module_put(type->driver.owner);
                        return retval;
                }
        }
@@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface,
                if (num_bulk_in == 0 || num_bulk_out == 0) {
                        dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
                        kfree(serial);
+                       module_put(type->driver.owner);
                        return -ENODEV;
                }
        }
@@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface,
                        dev_err(&interface->dev,
                            "Generic device with no bulk out, not allowed.\n");
                        kfree(serial);
+                       module_put(type->driver.owner);
                        return -EIO;
                }
        }
 #endif
        if (!num_ports) {
                /* if this device type has a calc_num_ports function, call it */
-               if (type->calc_num_ports) {
-                       if (!try_module_get(type->driver.owner)) {
-                               dev_err(&interface->dev,
-                                       "module get failed, exiting\n");
-                               kfree(serial);
-                               return -EIO;
-                       }
+               if (type->calc_num_ports)
                        num_ports = type->calc_num_ports(serial);
-                       module_put(type->driver.owner);
-               }
                if (!num_ports)
                        num_ports = type->num_ports;
        }
@@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface,
 
        /* if this device type has an attach function, call it */
        if (type->attach) {
-               if (!try_module_get(type->driver.owner)) {
-                       dev_err(&interface->dev,
-                                       "module get failed, exiting\n");
-                       goto probe_error;
-               }
                retval = type->attach(serial);
-               module_put(type->driver.owner);
                if (retval < 0)
                        goto probe_error;
                serial->attached = 1;
@@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface,
 exit:
        /* success */
        usb_set_intfdata(interface, serial);
+       module_put(type->driver.owner);
        return 0;
 
 probe_error:
        usb_serial_put(serial);
+       module_put(type->driver.owner);
        return -EIO;
 }
 EXPORT_SYMBOL_GPL(usb_serial_probe);
index e05557d529992ec4deb1827a32574043bcd00925..4b99117f3ecd209c63571c610f813bd41d50abba 100644 (file)
@@ -323,7 +323,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
        dev->mm = NULL;
 
        WARN_ON(!list_empty(&dev->work_list));
-       kthread_stop(dev->worker);
+       if (dev->worker) {
+               kthread_stop(dev->worker);
+               dev->worker = NULL;
+       }
 }
 
 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
index c91a7f70f7b086f56882ba07f97ba38e314adfd6..5d786bd3e304f1a174e682461d7d94a265ae2119 100644 (file)
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
        return ret;
 }
 
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
 {
        struct pxa168fb_mach_info *mi;
        struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
        .probe          = pxa168fb_probe,
 };
 
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
 {
        return platform_driver_register(&pxa168fb_driver);
 }
index 72f91bff29c7d836d86844c224225e1353c76648..13365ba3521853eb738f6fa9b8eb68a010c3b04b 100644 (file)
@@ -112,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
 #define VALID_EVTCHN(chn)      ((chn) != 0)
 
 static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_percpu_chip;
 
 /* Constructor for packed IRQ information. */
 static struct irq_info mk_unbound_info(void)
@@ -377,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                irq = find_unbound_irq();
 
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_level_irq, "event");
+                                             handle_edge_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_evtchn_info(evtchn);
@@ -403,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                if (irq < 0)
                        goto out;
 
-               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_level_irq, "ipi");
+               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+                                             handle_percpu_irq, "ipi");
 
                bind_ipi.vcpu = cpu;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -444,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
                irq = find_unbound_irq();
 
-               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_level_irq, "virq");
+               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+                                             handle_percpu_irq, "virq");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -964,6 +965,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
        .retrigger      = retrigger_dynirq,
 };
 
+static struct irq_chip xen_percpu_chip __read_mostly = {
+       .name           = "xen-percpu",
+
+       .disable        = disable_dynirq,
+       .mask           = disable_dynirq,
+       .unmask         = enable_dynirq,
+
+       .ack            = ack_dynirq,
+};
+
 int xen_set_callback_via(uint64_t via)
 {
        struct xen_hvm_param a;
index 1799bd8903151db422bd28cf1173564f0d6872d4..ef9c7db52077c6d58c0d5f036be0a03bd771dffa 100644 (file)
@@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
                goto again;
 
        if (sysrq_key != '\0')
-               handle_sysrq(sysrq_key, NULL);
+               handle_sysrq(sysrq_key);
 }
 
 static struct xenbus_watch sysrq_watch = {
index b27f09f05d177fb45cd0dc25f674853656ad3789..9c2d19452d0bf08a4c3642c7596d6056a8456797 100644 (file)
@@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
 
 # Directories which we _might_ need to create, so we have a rule for them.
-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
+firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
 
 quiet_cmd_mkdir = MKDIR   $(patsubst $(objtree)/%,%,$@)
       cmd_mkdir = mkdir -p $@
index 358563689064df61c9b81713ad8bb8911c6518bd..6406f896bf95fe56d404f40cdae7e083d88e3e9c 100644 (file)
@@ -242,7 +242,8 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
        }
        kfree(wnames);
 fid_out:
-       v9fs_fid_add(dentry, fid);
+       if (!IS_ERR(fid))
+               v9fs_fid_add(dentry, fid);
 err_out:
        up_read(&v9ses->rename_sem);
        return fid;
index a7528b91393676bb1f1affa72f6b78f38206d4c5..fd0cc0bf9a40396a150ad77b69bf5284531d6125 100644 (file)
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err) {
-               err = register_binfmt(&misc_format);
+               err = insert_binfmt(&misc_format);
                if (err)
                        unregister_filesystem(&bm_fs_type);
        }
index 5598a0d02295d11eaa3646e137497a083910251e..4cfce1ee31faaf4f2f6aab966acd5d6001753940 100644 (file)
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
 
        /* dirty the head */
        spin_lock(&inode->i_lock);
-       if (ci->i_wrbuffer_ref_head == 0)
+       if (ci->i_head_snapc == NULL)
                ci->i_head_snapc = ceph_get_snap_context(snapc);
        ++ci->i_wrbuffer_ref_head;
        if (ci->i_wrbuffer_ref == 0)
@@ -105,13 +105,7 @@ static int ceph_set_page_dirty(struct page *page)
        spin_lock_irq(&mapping->tree_lock);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(!PageUptodate(page));
-
-               if (mapping_cap_account_dirty(mapping)) {
-                       __inc_zone_page_state(page, NR_FILE_DIRTY);
-                       __inc_bdi_stat(mapping->backing_dev_info,
-                                       BDI_RECLAIMABLE);
-                       task_io_account_write(PAGE_CACHE_SIZE);
-               }
+               account_page_dirtied(page, page->mapping);
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
 
@@ -352,7 +346,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                        break;
                }
        }
-       if (!snapc && ci->i_head_snapc) {
+       if (!snapc && ci->i_wrbuffer_ref_head) {
                snapc = ceph_get_snap_context(ci->i_head_snapc);
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
index 582e0b2caf8a3ce6c2b943c0980c119be7950a63..a2d002cbdec23d15d30624a345ae6090ea452a53 100644 (file)
@@ -376,7 +376,7 @@ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
 
                th = get_ticket_handler(ac, service);
 
-               if (!th) {
+               if (IS_ERR(th)) {
                        *pneed |= service;
                        continue;
                }
@@ -399,6 +399,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th =
                get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
 
+       if (IS_ERR(th))
+               return PTR_ERR(th);
+
        ceph_x_validate_tickets(ac, &need);
 
        dout("build_request want %x have %x need %x\n",
@@ -450,7 +453,6 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
                        return -ERANGE;
                head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
 
-               BUG_ON(!th);
                ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
                if (ret)
                        return ret;
@@ -505,7 +507,8 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
 
        case CEPHX_GET_PRINCIPAL_SESSION_KEY:
                th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
-               BUG_ON(!th);
+               if (IS_ERR(th))
+                       return PTR_ERR(th);
                ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
                                               buf + sizeof(*head), end);
                break;
@@ -563,8 +566,8 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        void *end = p + sizeof(au->reply_buf);
 
        th = get_ticket_handler(ac, au->service);
-       if (!th)
-               return -EIO;  /* hrm! */
+       if (IS_ERR(th))
+               return PTR_ERR(th);
        ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
        if (ret < 0)
                return ret;
@@ -626,7 +629,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th;
 
        th = get_ticket_handler(ac, peer_type);
-       if (th && !IS_ERR(th))
+       if (!IS_ERR(th))
                remove_ticket_handler(ac, th);
 }
 
index 7bf182b0397396f961151a1c1b59218bc1cb2749..a2069b6680aed83eb0be0af7c584b4a619ae239a 100644 (file)
@@ -1082,6 +1082,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        gid_t gid;
        struct ceph_mds_session *session;
        u64 xattr_version = 0;
+       struct ceph_buffer *xattr_blob = NULL;
        int delayed = 0;
        u64 flush_tid = 0;
        int i;
@@ -1142,6 +1143,10 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                for (i = 0; i < CEPH_CAP_BITS; i++)
                        if (flushing & (1 << i))
                                ci->i_cap_flush_tid[i] = flush_tid;
+
+               follows = ci->i_head_snapc->seq;
+       } else {
+               follows = 0;
        }
 
        keep = cap->implemented;
@@ -1155,14 +1160,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        mtime = inode->i_mtime;
        atime = inode->i_atime;
        time_warp_seq = ci->i_time_warp_seq;
-       follows = ci->i_snap_realm->cached_context->seq;
        uid = inode->i_uid;
        gid = inode->i_gid;
        mode = inode->i_mode;
 
-       if (dropping & CEPH_CAP_XATTR_EXCL) {
+       if (flushing & CEPH_CAP_XATTR_EXCL) {
                __ceph_build_xattrs_blob(ci);
-               xattr_version = ci->i_xattrs.version + 1;
+               xattr_blob = ci->i_xattrs.blob;
+               xattr_version = ci->i_xattrs.version;
        }
 
        spin_unlock(&inode->i_lock);
@@ -1170,9 +1175,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
                size, max_size, &mtime, &atime, time_warp_seq,
-               uid, gid, mode,
-               xattr_version,
-               (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
+               uid, gid, mode, xattr_version, xattr_blob,
                follows);
        if (ret < 0) {
                dout("error sending cap msg, must requeue %p\n", inode);
@@ -1282,7 +1285,7 @@ retry:
                             &capsnap->mtime, &capsnap->atime,
                             capsnap->time_warp_seq,
                             capsnap->uid, capsnap->gid, capsnap->mode,
-                            0, NULL,
+                            capsnap->xattr_version, capsnap->xattr_blob,
                             capsnap->follows);
 
                next_follows = capsnap->follows + 1;
@@ -1332,7 +1335,11 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
             ceph_cap_string(was | mask));
        ci->i_dirty_caps |= mask;
        if (was == 0) {
-               dout(" inode %p now dirty\n", &ci->vfs_inode);
+               if (!ci->i_head_snapc)
+                       ci->i_head_snapc = ceph_get_snap_context(
+                               ci->i_snap_realm->cached_context);
+               dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
+                       ci->i_head_snapc);
                BUG_ON(!list_empty(&ci->i_dirty_item));
                spin_lock(&mdsc->cap_dirty_lock);
                list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@@ -2190,7 +2197,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
 
        if (ci->i_head_snapc == snapc) {
                ci->i_wrbuffer_ref_head -= nr;
-               if (!ci->i_wrbuffer_ref_head) {
+               if (ci->i_wrbuffer_ref_head == 0 &&
+                   ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
+                       BUG_ON(!ci->i_head_snapc);
                        ceph_put_snap_context(ci->i_head_snapc);
                        ci->i_head_snapc = NULL;
                }
@@ -2483,6 +2492,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                        dout(" inode %p now clean\n", inode);
                        BUG_ON(!list_empty(&ci->i_dirty_item));
                        drop = 1;
+                       if (ci->i_wrbuffer_ref_head == 0) {
+                               BUG_ON(!ci->i_head_snapc);
+                               ceph_put_snap_context(ci->i_head_snapc);
+                               ci->i_head_snapc = NULL;
+                       }
                } else {
                        BUG_ON(list_empty(&ci->i_dirty_item));
                }
index 360c4f22718d5e079c2ce99c2484af9376751a76..6fd8b20a86112c367c788a20c2f134108acc40e8 100644 (file)
@@ -171,6 +171,8 @@ static int mdsc_show(struct seq_file *s, void *p)
                } else if (req->r_dentry) {
                        path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
                                                    &pathbase, 0);
+                       if (IS_ERR(path))
+                               path = NULL;
                        spin_lock(&req->r_dentry->d_lock);
                        seq_printf(s, " #%llx/%.*s (%s)",
                                   ceph_ino(req->r_dentry->d_parent->d_inode),
@@ -187,6 +189,8 @@ static int mdsc_show(struct seq_file *s, void *p)
                if (req->r_old_dentry) {
                        path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
                                                    &pathbase, 0);
+                       if (IS_ERR(path))
+                               path = NULL;
                        spin_lock(&req->r_old_dentry->d_lock);
                        seq_printf(s, " #%llx/%.*s (%s)",
                           ceph_ino(req->r_old_dentry->d_parent->d_inode),
index 67bbb41d5526fcbdfb74f48f0e261b59b957ef9c..6e4f43ff23ec587050eab1b0e735e8d519827c85 100644 (file)
@@ -46,7 +46,7 @@ int ceph_init_dentry(struct dentry *dentry)
        else
                dentry->d_op = &ceph_snap_dentry_ops;
 
-       di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
+       di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
        if (!di)
                return -ENOMEM;          /* oh well */
 
index 5d893d31e399b81f57543ede560dc2258f7400da..e7cca414da03bcbd7549889a5ecb00d05ee11901 100644 (file)
@@ -677,6 +677,7 @@ static int fill_inode(struct inode *inode,
                if (ci->i_files == 0 && ci->i_subdirs == 0 &&
                    ceph_snap(inode) == CEPH_NOSNAP &&
                    (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
+                   (issued & CEPH_CAP_FILE_EXCL) == 0 &&
                    (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
                        dout(" marking %p complete (empty)\n", inode);
                        ci->i_ceph_flags |= CEPH_I_COMPLETE;
@@ -1229,11 +1230,11 @@ retry_lookup:
                        in = dn->d_inode;
                } else {
                        in = ceph_get_inode(parent->d_sb, vino);
-                       if (in == NULL) {
+                       if (IS_ERR(in)) {
                                dout("new_inode badness\n");
                                d_delete(dn);
                                dput(dn);
-                               err = -ENOMEM;
+                               err = PTR_ERR(in);
                                goto out;
                        }
                        dn = splice_dentry(dn, in, NULL);
index ae85af06454fd3544840329be8639a0910d2cb3d..ff4e753aae929d37d414567d22fd6afef7316c7e 100644 (file)
@@ -82,7 +82,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
                length = fl->fl_end - fl->fl_start + 1;
 
        err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
-                               (u64)fl->fl_pid, (u64)fl->fl_nspid,
+                               (u64)fl->fl_pid,
+                               (u64)(unsigned long)fl->fl_nspid,
                                lock_cmd, fl->fl_start,
                                length, wait);
        if (!err) {
@@ -92,7 +93,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
                        /* undo! This should only happen if the kernel detects
                         * local deadlock. */
                        ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
-                                         (u64)fl->fl_pid, (u64)fl->fl_nspid,
+                                         (u64)fl->fl_pid,
+                                         (u64)(unsigned long)fl->fl_nspid,
                                          CEPH_LOCK_UNLOCK, fl->fl_start,
                                          length, 0);
                        dout("got %d on posix_lock_file, undid lock", err);
@@ -132,7 +134,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                length = fl->fl_end - fl->fl_start + 1;
 
        err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
-                               file, (u64)fl->fl_pid, (u64)fl->fl_nspid,
+                               file, (u64)fl->fl_pid,
+                               (u64)(unsigned long)fl->fl_nspid,
                                lock_cmd, fl->fl_start,
                                length, wait);
        if (!err) {
@@ -141,7 +144,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                        ceph_lock_message(CEPH_LOCK_FLOCK,
                                          CEPH_MDS_OP_SETFILELOCK,
                                          file, (u64)fl->fl_pid,
-                                         (u64)fl->fl_nspid,
+                                         (u64)(unsigned long)fl->fl_nspid,
                                          CEPH_LOCK_UNLOCK, fl->fl_start,
                                          length, 0);
                        dout("got %d on flock_lock_file_wait, undid lock", err);
@@ -235,7 +238,8 @@ int lock_to_ceph_filelock(struct file_lock *lock,
        cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
        cephlock->client = cpu_to_le64(0);
        cephlock->pid = cpu_to_le64(lock->fl_pid);
-       cephlock->pid_namespace = cpu_to_le64((u64)lock->fl_nspid);
+       cephlock->pid_namespace =
+               cpu_to_le64((u64)(unsigned long)lock->fl_nspid);
 
        switch (lock->fl_type) {
        case F_RDLCK:
index a75ddbf9fe3743973c6fd02724f005b8504c3738..f091b1351786368de18757d8cb262a19d1006bf1 100644 (file)
@@ -560,6 +560,13 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
  *
  * Called under mdsc->mutex.
  */
+struct dentry *get_nonsnap_parent(struct dentry *dentry)
+{
+       while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+               dentry = dentry->d_parent;
+       return dentry;
+}
+
 static int __choose_mds(struct ceph_mds_client *mdsc,
                        struct ceph_mds_request *req)
 {
@@ -590,14 +597,29 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        if (req->r_inode) {
                inode = req->r_inode;
        } else if (req->r_dentry) {
-               if (req->r_dentry->d_inode) {
+               struct inode *dir = req->r_dentry->d_parent->d_inode;
+
+               if (dir->i_sb != mdsc->client->sb) {
+                       /* not this fs! */
+                       inode = req->r_dentry->d_inode;
+               } else if (ceph_snap(dir) != CEPH_NOSNAP) {
+                       /* direct snapped/virtual snapdir requests
+                        * based on parent dir inode */
+                       struct dentry *dn =
+                               get_nonsnap_parent(req->r_dentry->d_parent);
+                       inode = dn->d_inode;
+                       dout("__choose_mds using nonsnap parent %p\n", inode);
+               } else if (req->r_dentry->d_inode) {
+                       /* dentry target */
                        inode = req->r_dentry->d_inode;
                } else {
-                       inode = req->r_dentry->d_parent->d_inode;
+                       /* dir + name */
+                       inode = dir;
                        hash = req->r_dentry->d_name.hash;
                        is_hash = true;
                }
        }
+
        dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
             (int)hash, mode);
        if (!inode)
@@ -2208,7 +2230,7 @@ static void handle_session(struct ceph_mds_session *session,
                        pr_info("mds%d reconnect denied\n", session->s_mds);
                remove_session_caps(session);
                wake = 1; /* for good measure */
-               complete_all(&mdsc->session_close_waiters);
+               wake_up_all(&mdsc->session_close_wq);
                kick_requests(mdsc, mds);
                break;
 
@@ -2302,7 +2324,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
                if (IS_ERR(path)) {
                        err = PTR_ERR(path);
-                       BUG_ON(err);
+                       goto out_dput;
                }
        } else {
                path = NULL;
@@ -2310,7 +2332,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        }
        err = ceph_pagelist_encode_string(pagelist, path, pathlen);
        if (err)
-               goto out;
+               goto out_free;
 
        spin_lock(&inode->i_lock);
        cap->seq = 0;        /* reset cap seq */
@@ -2354,8 +2376,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                unlock_kernel();
        }
 
-out:
+out_free:
        kfree(path);
+out_dput:
        dput(dentry);
        return err;
 }
@@ -2876,7 +2899,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
                return -ENOMEM;
 
        init_completion(&mdsc->safe_umount_waiters);
-       init_completion(&mdsc->session_close_waiters);
+       init_waitqueue_head(&mdsc->session_close_wq);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
        mdsc->sessions = NULL;
        mdsc->max_sessions = 0;
@@ -3021,6 +3044,23 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
 }
 
+/*
+ * true if all sessions are closed, or we force unmount
+ */
+bool done_closing_sessions(struct ceph_mds_client *mdsc)
+{
+       int i, n = 0;
+
+       if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+               return true;
+
+       mutex_lock(&mdsc->mutex);
+       for (i = 0; i < mdsc->max_sessions; i++)
+               if (mdsc->sessions[i])
+                       n++;
+       mutex_unlock(&mdsc->mutex);
+       return n == 0;
+}
 
 /*
  * called after sb is ro.
@@ -3029,45 +3069,32 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 {
        struct ceph_mds_session *session;
        int i;
-       int n;
        struct ceph_client *client = mdsc->client;
-       unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
+       unsigned long timeout = client->mount_args->mount_timeout * HZ;
 
        dout("close_sessions\n");
 
-       mutex_lock(&mdsc->mutex);
-
        /* close sessions */
-       started = jiffies;
-       while (time_before(jiffies, started + timeout)) {
-               dout("closing sessions\n");
-               n = 0;
-               for (i = 0; i < mdsc->max_sessions; i++) {
-                       session = __ceph_lookup_mds_session(mdsc, i);
-                       if (!session)
-                               continue;
-                       mutex_unlock(&mdsc->mutex);
-                       mutex_lock(&session->s_mutex);
-                       __close_session(mdsc, session);
-                       mutex_unlock(&session->s_mutex);
-                       ceph_put_mds_session(session);
-                       mutex_lock(&mdsc->mutex);
-                       n++;
-               }
-               if (n == 0)
-                       break;
-
-               if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
-                       break;
-
-               dout("waiting for sessions to close\n");
+       mutex_lock(&mdsc->mutex);
+       for (i = 0; i < mdsc->max_sessions; i++) {
+               session = __ceph_lookup_mds_session(mdsc, i);
+               if (!session)
+                       continue;
                mutex_unlock(&mdsc->mutex);
-               wait_for_completion_timeout(&mdsc->session_close_waiters,
-                                           timeout);
+               mutex_lock(&session->s_mutex);
+               __close_session(mdsc, session);
+               mutex_unlock(&session->s_mutex);
+               ceph_put_mds_session(session);
                mutex_lock(&mdsc->mutex);
        }
+       mutex_unlock(&mdsc->mutex);
+
+       dout("waiting for sessions to close\n");
+       wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
+                          timeout);
 
        /* tear down remaining sessions */
+       mutex_lock(&mdsc->mutex);
        for (i = 0; i < mdsc->max_sessions; i++) {
                if (mdsc->sessions[i]) {
                        session = get_session(mdsc->sessions[i]);
@@ -3080,9 +3107,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
                        mutex_lock(&mdsc->mutex);
                }
        }
-
        WARN_ON(!list_empty(&mdsc->cap_delay_list));
-
        mutex_unlock(&mdsc->mutex);
 
        ceph_cleanup_empty_realms(mdsc);
index ab7e89f5e344b14d44500866625f378a2c849d57..c98267ce6d2ad97e1d9c86bc0660e2d82d39366c 100644 (file)
@@ -234,7 +234,8 @@ struct ceph_mds_client {
        struct mutex            mutex;         /* all nested structures */
 
        struct ceph_mdsmap      *mdsmap;
-       struct completion       safe_umount_waiters, session_close_waiters;
+       struct completion       safe_umount_waiters;
+       wait_queue_head_t       session_close_wq;
        struct list_head        waiting_for_map;
 
        struct ceph_mds_session **sessions;    /* NULL for mds if no session */
index bed6391e52c7109c966d5e642e472c665bd7d1f5..dfced1dacbcdcb47178f2c9676275869c2105296 100644 (file)
@@ -661,7 +661,7 @@ static int __send_request(struct ceph_osd_client *osdc,
        reqhead->reassert_version = req->r_reassert_version;
 
        req->r_stamp = jiffies;
-       list_move_tail(&osdc->req_lru, &req->r_req_lru_item);
+       list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
 
        ceph_msg_get(req->r_request); /* send consumes a ref */
        ceph_con_send(&req->r_osd->o_con, req->r_request);
index c0b26b6badba7f53d0d8b32be33c165fb762922e..4868b9dcac5a6cc7a4d00610780572f335ef68f2 100644 (file)
@@ -435,7 +435,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap_snap *capsnap;
-       int used;
+       int used, dirty;
 
        capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
        if (!capsnap) {
@@ -445,6 +445,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 
        spin_lock(&inode->i_lock);
        used = __ceph_caps_used(ci);
+       dirty = __ceph_caps_dirty(ci);
        if (__ceph_have_pending_cap_snap(ci)) {
                /* there is no point in queuing multiple "pending" cap_snaps,
                   as no new writes are allowed to start when pending, so any
@@ -452,11 +453,15 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                   cap_snap.  lucky us. */
                dout("queue_cap_snap %p already pending\n", inode);
                kfree(capsnap);
-       } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+       } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) ||
+                  (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
+                            CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
                struct ceph_snap_context *snapc = ci->i_head_snapc;
 
+               dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
+                    capsnap, snapc);
                igrab(inode);
-
+               
                atomic_set(&capsnap->nref, 1);
                capsnap->ci = ci;
                INIT_LIST_HEAD(&capsnap->ci_item);
@@ -464,15 +469,21 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 
                capsnap->follows = snapc->seq - 1;
                capsnap->issued = __ceph_caps_issued(ci, NULL);
-               capsnap->dirty = __ceph_caps_dirty(ci);
+               capsnap->dirty = dirty;
 
                capsnap->mode = inode->i_mode;
                capsnap->uid = inode->i_uid;
                capsnap->gid = inode->i_gid;
 
-               /* fixme? */
-               capsnap->xattr_blob = NULL;
-               capsnap->xattr_len = 0;
+               if (dirty & CEPH_CAP_XATTR_EXCL) {
+                       __ceph_build_xattrs_blob(ci);
+                       capsnap->xattr_blob =
+                               ceph_buffer_get(ci->i_xattrs.blob);
+                       capsnap->xattr_version = ci->i_xattrs.version;
+               } else {
+                       capsnap->xattr_blob = NULL;
+                       capsnap->xattr_version = 0;
+               }
 
                /* dirty page count moved from _head to this cap_snap;
                   all subsequent writes page dirties occur _after_ this
@@ -480,7 +491,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
                ci->i_wrbuffer_ref_head = 0;
                capsnap->context = snapc;
-               ci->i_head_snapc = NULL;
+               ci->i_head_snapc =
+                       ceph_get_snap_context(ci->i_snap_realm->cached_context);
+               dout(" new snapc is %p\n", ci->i_head_snapc);
                list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
 
                if (used & CEPH_CAP_FILE_WR) {
@@ -539,6 +552,41 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
        return 1;  /* caller may want to ceph_flush_snaps */
 }
 
+/*
+ * Queue cap_snaps for snap writeback for this realm and its children.
+ * Called under snap_rwsem, so realm topology won't change.
+ */
+static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
+{
+       struct ceph_inode_info *ci;
+       struct inode *lastinode = NULL;
+       struct ceph_snap_realm *child;
+
+       dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
+
+       spin_lock(&realm->inodes_with_caps_lock);
+       list_for_each_entry(ci, &realm->inodes_with_caps,
+                           i_snap_realm_item) {
+               struct inode *inode = igrab(&ci->vfs_inode);
+               if (!inode)
+                       continue;
+               spin_unlock(&realm->inodes_with_caps_lock);
+               if (lastinode)
+                       iput(lastinode);
+               lastinode = inode;
+               ceph_queue_cap_snap(ci);
+               spin_lock(&realm->inodes_with_caps_lock);
+       }
+       spin_unlock(&realm->inodes_with_caps_lock);
+       if (lastinode)
+               iput(lastinode);
+
+       dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
+       list_for_each_entry(child, &realm->children, child_item)
+               queue_realm_cap_snaps(child);
+
+       dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
+}
 
 /*
  * Parse and apply a snapblob "snap trace" from the MDS.  This specifies
@@ -589,29 +637,8 @@ more:
                 *
                 * ...unless it's a snap deletion!
                 */
-               if (!deletion) {
-                       struct ceph_inode_info *ci;
-                       struct inode *lastinode = NULL;
-
-                       spin_lock(&realm->inodes_with_caps_lock);
-                       list_for_each_entry(ci, &realm->inodes_with_caps,
-                                           i_snap_realm_item) {
-                               struct inode *inode = igrab(&ci->vfs_inode);
-                               if (!inode)
-                                       continue;
-                               spin_unlock(&realm->inodes_with_caps_lock);
-                               if (lastinode)
-                                       iput(lastinode);
-                               lastinode = inode;
-                               ceph_queue_cap_snap(ci);
-                               spin_lock(&realm->inodes_with_caps_lock);
-                       }
-                       spin_unlock(&realm->inodes_with_caps_lock);
-                       if (lastinode)
-                               iput(lastinode);
-                       dout("update_snap_trace cap_snaps queued\n");
-               }
-
+               if (!deletion)
+                       queue_realm_cap_snaps(realm);
        } else {
                dout("update_snap_trace %llx %p seq %lld unchanged\n",
                     realm->ino, realm, realm->seq);
index 2482d696f0de17f2fcccdedfc79d2ad4a14c6664..c33897ae5725e82ca269606b78d54214d8abf7af 100644 (file)
@@ -216,8 +216,7 @@ struct ceph_cap_snap {
        uid_t uid;
        gid_t gid;
 
-       void *xattr_blob;
-       int xattr_len;
+       struct ceph_buffer *xattr_blob;
        u64 xattr_version;
 
        u64 size;
@@ -229,8 +228,11 @@ struct ceph_cap_snap {
 
 static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
 {
-       if (atomic_dec_and_test(&capsnap->nref))
+       if (atomic_dec_and_test(&capsnap->nref)) {
+               if (capsnap->xattr_blob)
+                       ceph_buffer_put(capsnap->xattr_blob);
                kfree(capsnap);
+       }
 }
 
 /*
@@ -342,7 +344,8 @@ struct ceph_inode_info {
        unsigned i_cap_exporting_issued;
        struct ceph_cap_reservation i_cap_migration_resv;
        struct list_head i_cap_snaps;   /* snapped state pending flush to mds */
-       struct ceph_snap_context *i_head_snapc;  /* set if wr_buffer_head > 0 */
+       struct ceph_snap_context *i_head_snapc;  /* set if wr_buffer_head > 0 or
+                                                   dirty|flushing caps */
        unsigned i_snap_caps;           /* cap bits for snapped files */
 
        int i_nr_by_mode[CEPH_FILE_MODE_NUM];  /* open file counts */
index 097a2654c00f517c004ef429b3c0046664f205c3..9578af610b73fb48b69872ddeed222e58c8340f0 100644 (file)
@@ -485,6 +485,7 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
                ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
                ci->i_xattrs.prealloc_blob = NULL;
                ci->i_xattrs.dirty = false;
+               ci->i_xattrs.version++;
        }
 }
 
index 917b7d449bb2a6248c28c23284cf82d9c032f285..0da1debd499d1845420753f50ef7b7da6765c237 100644 (file)
@@ -2,6 +2,8 @@ config CIFS
        tristate "CIFS support (advanced network filesystem, SMBFS successor)"
        depends on INET
        select NLS
+       select CRYPTO_MD5
+       select CRYPTO_ARC4
        help
          This is the client VFS module for the Common Internet File System
          (CIFS) protocol which is the successor to the Server Message Block
index cfd1ce34e0bc7b8c4794c81e1aa937e7f009ca75..21f0fbd86989a6811dd6323668f0836079f7e621 100644 (file)
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
                                if (compare_oid(oid, oidlen, MSKRB5_OID,
                                                MSKRB5_OID_LEN))
                                        server->sec_mskerberos = true;
-                               else if (compare_oid(oid, oidlen, KRB5U2U_OID,
+                               if (compare_oid(oid, oidlen, KRB5U2U_OID,
                                                     KRB5U2U_OID_LEN))
                                        server->sec_kerberosu2u = true;
-                               else if (compare_oid(oid, oidlen, KRB5_OID,
+                               if (compare_oid(oid, oidlen, KRB5_OID,
                                                     KRB5_OID_LEN))
                                        server->sec_kerberos = true;
-                               else if (compare_oid(oid, oidlen, NTLMSSP_OID,
+                               if (compare_oid(oid, oidlen, NTLMSSP_OID,
                                                     NTLMSSP_OID_LEN))
                                        server->sec_ntlmssp = true;
 
index 650638275a6feed35d31fdbc183e57092b872f33..7fe6b52df5076a2eb14a7ec563403795e2e4c041 100644 (file)
@@ -30,6 +30,8 @@
  *     This is a compressed table of upper and lower case conversion.
  *
  */
+#ifndef _CIFS_UNICODE_H
+#define _CIFS_UNICODE_H
 
 #include <asm/byteorder.h>
 #include <linux/types.h>
@@ -67,8 +69,8 @@ extern const struct UniCaseRange CifsUniUpperRange[];
 #endif                         /* UNIUPR_NOUPPER */
 
 #ifndef UNIUPR_NOLOWER
-extern signed char UniLowerTable[512];
-extern struct UniCaseRange UniLowerRange[];
+extern signed char CifsUniLowerTable[512];
+extern const struct UniCaseRange CifsUniLowerRange[];
 #endif                         /* UNIUPR_NOLOWER */
 
 #ifdef __KERNEL__
@@ -337,15 +339,15 @@ UniStrupr(register wchar_t *upin)
  * UniTolower:  Convert a unicode character to lower case
  */
 static inline wchar_t
-UniTolower(wchar_t uc)
+UniTolower(register wchar_t uc)
 {
-       register struct UniCaseRange *rp;
+       register const struct UniCaseRange *rp;
 
-       if (uc < sizeof(UniLowerTable)) {
+       if (uc < sizeof(CifsUniLowerTable)) {
                /* Latin characters */
-               return uc + UniLowerTable[uc];  /* Use base tables */
+               return uc + CifsUniLowerTable[uc];      /* Use base tables */
        } else {
-               rp = UniLowerRange;     /* Use range tables */
+               rp = CifsUniLowerRange; /* Use range tables */
                while (rp->start) {
                        if (uc < rp->start)     /* Before start of range */
                                return uc;      /* Uppercase = input */
@@ -374,3 +376,5 @@ UniStrlwr(register wchar_t *upin)
 }
 
 #endif
+
+#endif /* _CIFS_UNICODE_H */
index 18a9d978e5190dfc9e3974fdf46f193d0c29d8b6..0ac7c5a8633a777dd1d804138ea82aff9f957521 100644 (file)
@@ -140,7 +140,7 @@ const struct UniCaseRange CifsUniUpperRange[] = {
 /*
  * Latin lower case
  */
-static signed char CifsUniLowerTable[512] = {
+signed char CifsUniLowerTable[512] = {
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
@@ -242,12 +242,12 @@ static signed char UniCaseRangeLff20[27] = {
 /*
  * Lower Case Range
  */
-static const struct UniCaseRange CifsUniLowerRange[] = {
-       0x0380, 0x03ab, UniCaseRangeL0380,
-       0x0400, 0x042f, UniCaseRangeL0400,
-       0x0490, 0x04cb, UniCaseRangeL0490,
-       0x1e00, 0x1ff7, UniCaseRangeL1e00,
-       0xff20, 0xff3a, UniCaseRangeLff20,
-       0, 0, 0
+const struct UniCaseRange CifsUniLowerRange[] = {
+       {0x0380, 0x03ab, UniCaseRangeL0380},
+       {0x0400, 0x042f, UniCaseRangeL0400},
+       {0x0490, 0x04cb, UniCaseRangeL0490},
+       {0x1e00, 0x1ff7, UniCaseRangeL1e00},
+       {0xff20, 0xff3a, UniCaseRangeLff20},
+       {0}
 };
 #endif
index 847628dfdc44fa95c5c0ea232b8b28ccb03347e3..709f2296bdb4930b2eee0dcb7dd341f94ca42e9b 100644 (file)
@@ -27,6 +27,7 @@
 #include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
+#include "ntlmssp.h"
 #include <linux/ctype.h>
 #include <linux/random.h>
 
@@ -42,21 +43,43 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
                       unsigned char *p24);
 
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
-                                   const struct mac_key *key, char *signature)
+                       struct TCP_Server_Info *server, char *signature)
 {
-       struct  MD5Context context;
+       int rc;
 
-       if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
+       if (cifs_pdu == NULL || server == NULL || signature == NULL)
                return -EINVAL;
 
-       cifs_MD5_init(&context);
-       cifs_MD5_update(&context, (char *)&key->data, key->len);
-       cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+       if (!server->ntlmssp.sdescmd5) {
+               cERROR(1,
+                       "cifs_calculate_signature: can't generate signature\n");
+               return -1;
+       }
 
-       cifs_MD5_final(signature, &context);
-       return 0;
+       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
+       if (rc) {
+               cERROR(1, "cifs_calculate_signature: oould not init md5\n");
+               return rc;
+       }
+
+       if (server->secType == RawNTLMSSP)
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       server->session_key.data.ntlmv2.key,
+                       CIFS_NTLMV2_SESSKEY_SIZE);
+       else
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       (char *)&server->session_key.data,
+                       server->session_key.len);
+
+       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+
+       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
+
+       return rc;
 }
 
+
 int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
                  __u32 *pexpected_response_sequence_number)
 {
@@ -78,8 +101,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
-                                     smb_signature);
+       rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -89,21 +111,39 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
 }
 
 static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
-                               const struct mac_key *key, char *signature)
+                       struct TCP_Server_Info *server, char *signature)
 {
-       struct  MD5Context context;
        int i;
+       int rc;
 
-       if ((iov == NULL) || (signature == NULL) || (key == NULL))
+       if (iov == NULL || server == NULL || signature == NULL)
                return -EINVAL;
 
-       cifs_MD5_init(&context);
-       cifs_MD5_update(&context, (char *)&key->data, key->len);
+       if (!server->ntlmssp.sdescmd5) {
+               cERROR(1, "cifs_calc_signature2: can't generate signature\n");
+               return -1;
+       }
+
+       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
+       if (rc) {
+               cERROR(1, "cifs_calc_signature2: oould not init md5\n");
+               return rc;
+       }
+
+       if (server->secType == RawNTLMSSP)
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       server->session_key.data.ntlmv2.key,
+                       CIFS_NTLMV2_SESSKEY_SIZE);
+       else
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       (char *)&server->session_key.data,
+                       server->session_key.len);
+
        for (i = 0; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
-                       cERROR(1, "null iovec entry");
+                       cERROR(1, "cifs_calc_signature2: null iovec entry");
                        return -EIO;
                }
                /* The first entry includes a length field (which does not get
@@ -111,18 +151,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
                if (i == 0) {
                        if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
                                break; /* nothing to sign or corrupt header */
-                       cifs_MD5_update(&context, iov[0].iov_base+4,
-                                 iov[0].iov_len-4);
+                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                               iov[i].iov_base + 4, iov[i].iov_len - 4);
                } else
-                       cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
+                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                               iov[i].iov_base, iov[i].iov_len);
        }
 
-       cifs_MD5_final(signature, &context);
+       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
 
-       return 0;
+       return rc;
 }
 
-
 int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
                   __u32 *pexpected_response_sequence_number)
 {
@@ -145,8 +185,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
-                                     smb_signature);
+       rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -156,14 +195,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
 }
 
 int cifs_verify_signature(struct smb_hdr *cifs_pdu,
-                         const struct mac_key *mac_key,
+                         struct TCP_Server_Info *server,
                          __u32 expected_sequence_number)
 {
-       unsigned int rc;
+       int rc;
        char server_response_sig[8];
        char what_we_think_sig_should_be[20];
 
-       if ((cifs_pdu == NULL) || (mac_key == NULL))
+       if (cifs_pdu == NULL || server == NULL)
                return -EINVAL;
 
        if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -192,7 +231,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
-       rc = cifs_calculate_signature(cifs_pdu, mac_key,
+       rc = cifs_calculate_signature(cifs_pdu, server,
                what_we_think_sig_should_be);
 
        if (rc)
@@ -209,7 +248,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
 }
 
 /* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+int cifs_calculate_session_key(struct session_key *key, const char *rn,
                           const char *password)
 {
        char temp_key[16];
@@ -223,63 +262,6 @@ int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
        return 0;
 }
 
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
-                              const struct nls_table *nls_info)
-{
-       char temp_hash[16];
-       struct HMACMD5Context ctx;
-       char *ucase_buf;
-       __le16 *unicode_buf;
-       unsigned int i, user_name_len, dom_name_len;
-
-       if (ses == NULL)
-               return -EINVAL;
-
-       E_md4hash(ses->password, temp_hash);
-
-       hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
-       user_name_len = strlen(ses->userName);
-       if (user_name_len > MAX_USERNAME_SIZE)
-               return -EINVAL;
-       if (ses->domainName == NULL)
-               return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
-       dom_name_len = strlen(ses->domainName);
-       if (dom_name_len > MAX_USERNAME_SIZE)
-               return -EINVAL;
-
-       ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
-       if (ucase_buf == NULL)
-               return -ENOMEM;
-       unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
-       if (unicode_buf == NULL) {
-               kfree(ucase_buf);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < user_name_len; i++)
-               ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
-       ucase_buf[i] = 0;
-       user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
-                                     MAX_USERNAME_SIZE*2, nls_info);
-       unicode_buf[user_name_len] = 0;
-       user_name_len++;
-
-       for (i = 0; i < dom_name_len; i++)
-               ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
-       ucase_buf[i] = 0;
-       dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
-                                    MAX_USERNAME_SIZE*2, nls_info);
-
-       unicode_buf[user_name_len + dom_name_len] = 0;
-       hmac_md5_update((const unsigned char *) unicode_buf,
-               (user_name_len+dom_name_len)*2, &ctx);
-
-       hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
-       kfree(ucase_buf);
-       kfree(unicode_buf);
-       return 0;
-}
-
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
                        char *lnm_session_key)
@@ -324,38 +306,52 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
 {
        int rc = 0;
        int len;
-       char nt_hash[16];
-       struct HMACMD5Context *pctxt;
+       char nt_hash[CIFS_NTHASH_SIZE];
        wchar_t *user;
        wchar_t *domain;
+       wchar_t *server;
 
-       pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
-
-       if (pctxt == NULL)
-               return -ENOMEM;
+       if (!ses->server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+               return -1;
+       }
 
        /* calculate md4 hash of password */
        E_md4hash(ses->password, nt_hash);
 
-       /* convert Domainname to unicode and uppercase */
-       hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
+       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash,
+                               CIFS_NTHASH_SIZE);
+
+       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
+       if (rc) {
+               cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
+               return rc;
+       }
 
        /* convert ses->userName to unicode and uppercase */
        len = strlen(ses->userName);
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
-       if (user == NULL)
+       if (user == NULL) {
+               cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
+               rc = -ENOMEM;
                goto calc_exit_2;
+       }
        len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
        UniStrupr(user);
-       hmac_md5_update((char *)user, 2*len, pctxt);
+
+       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                               (char *)user, 2 * len);
 
        /* convert ses->domainName to unicode and uppercase */
        if (ses->domainName) {
                len = strlen(ses->domainName);
 
                domain = kmalloc(2 + (len * 2), GFP_KERNEL);
-               if (domain == NULL)
+               if (domain == NULL) {
+                       cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
+                       rc = -ENOMEM;
                        goto calc_exit_1;
+               }
                len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
                                        nls_cp);
                /* the following line was removed since it didn't work well
@@ -363,65 +359,292 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
                   Maybe converting the domain name earlier makes sense */
                /* UniStrupr(domain); */
 
-               hmac_md5_update((char *)domain, 2*len, pctxt);
+               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                                       (char *)domain, 2 * len);
 
                kfree(domain);
+       } else if (ses->serverName) {
+               len = strlen(ses->serverName);
+
+               server = kmalloc(2 + (len * 2), GFP_KERNEL);
+               if (server == NULL) {
+                       cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
+                       rc = -ENOMEM;
+                       goto calc_exit_1;
+               }
+               len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
+                                       nls_cp);
+               /* the following line was removed since it didn't work well
+                  with lower cased domain name that passed as an option.
+                  Maybe converting the domain name earlier makes sense */
+               /* UniStrupr(domain); */
+
+               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                                       (char *)server, 2 * len);
+
+               kfree(server);
        }
+
+       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                                       ses->server->ntlmv2_hash);
+
 calc_exit_1:
        kfree(user);
 calc_exit_2:
        /* BB FIXME what about bytes 24 through 40 of the signing key?
           compare with the NTLM example */
-       hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
 
-       kfree(pctxt);
        return rc;
 }
 
-void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
-                     const struct nls_table *nls_cp)
+static int
+find_domain_name(struct cifsSesInfo *ses)
+{
+       int rc = 0;
+       unsigned int attrsize;
+       unsigned int type;
+       unsigned char *blobptr;
+       struct ntlmssp2_name *attrptr;
+
+       if (ses->server->tiblob) {
+               blobptr = ses->server->tiblob;
+               attrptr = (struct ntlmssp2_name *) blobptr;
+
+               while ((type = attrptr->type) != 0) {
+                       blobptr += 2; /* advance attr type */
+                       attrsize = attrptr->length;
+                       blobptr += 2; /* advance attr size */
+                       if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
+                               if (!ses->domainName) {
+                                       ses->domainName =
+                                               kmalloc(attrptr->length + 1,
+                                                               GFP_KERNEL);
+                                       if (!ses->domainName)
+                                                       return -ENOMEM;
+                                       cifs_from_ucs2(ses->domainName,
+                                               (__le16 *)blobptr,
+                                               attrptr->length,
+                                               attrptr->length,
+                                               load_nls_default(), false);
+                               }
+                       }
+                       blobptr += attrsize; /* advance attr  value */
+                       attrptr = (struct ntlmssp2_name *) blobptr;
+               }
+       } else {
+               ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
+               ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
+               if (!ses->server->tiblob) {
+                       ses->server->tilen = 0;
+                       cERROR(1, "Challenge target info allocation failure");
+                       return -ENOMEM;
+               }
+               memset(ses->server->tiblob, 0x0, ses->server->tilen);
+               attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
+               attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
+       }
+
+       return rc;
+}
+
+static int
+CalcNTLMv2_response(const struct TCP_Server_Info *server,
+                        char *v2_session_response)
 {
        int rc;
+
+       if (!server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+               return -1;
+       }
+
+       crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
+               CIFS_HMAC_MD5_HASH_SIZE);
+
+       rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
+       if (rc) {
+               cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
+               return rc;
+       }
+
+       memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
+               server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
+       crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
+               v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
+               sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
+
+       if (server->tilen)
+               crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
+                                       server->tiblob, server->tilen);
+
+       rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
+                                       v2_session_response);
+
+       return rc;
+}
+
+int
+setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+                     const struct nls_table *nls_cp)
+{
+       int rc = 0;
        struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
-       struct HMACMD5Context context;
 
        buf->blob_signature = cpu_to_le32(0x00000101);
        buf->reserved = 0;
        buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
        get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
        buf->reserved2 = 0;
-       buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
-       buf->names[0].length = 0;
-       buf->names[1].type = 0;
-       buf->names[1].length = 0;
+
+       if (!ses->domainName) {
+               rc = find_domain_name(ses);
+               if (rc) {
+                       cERROR(1, "could not get domain/server name rc %d", rc);
+                       return rc;
+               }
+       }
 
        /* calculate buf->ntlmv2_hash */
        rc = calc_ntlmv2_hash(ses, nls_cp);
-       if (rc)
+       if (rc) {
                cERROR(1, "could not get v2 hash rc %d", rc);
-       CalcNTLMv2_response(ses, resp_buf);
+               return rc;
+       }
+       rc = CalcNTLMv2_response(ses->server, resp_buf);
+       if (rc) {
+               cERROR(1, "could not get v2 hash rc %d", rc);
+               return rc;
+       }
 
-       /* now calculate the MAC key for NTLMv2 */
-       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
-       hmac_md5_update(resp_buf, 16, &context);
-       hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
+       if (!ses->server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+               return -1;
+       }
 
-       memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
-              sizeof(struct ntlmv2_resp));
-       ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
+       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
+                       ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+
+       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
+       if (rc) {
+               cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n");
+               return rc;
+       }
+
+       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                               resp_buf, CIFS_HMAC_MD5_HASH_SIZE);
+
+       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
+               ses->server->session_key.data.ntlmv2.key);
+
+       memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
+                       sizeof(struct ntlmv2_resp));
+       ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
+
+       return rc;
 }
 
-void CalcNTLMv2_response(const struct cifsSesInfo *ses,
-                        char *v2_session_response)
+int
+calc_seckey(struct TCP_Server_Info *server)
 {
-       struct HMACMD5Context context;
-       /* rest of v2 struct already generated */
-       memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
-       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+       int rc;
+       unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
+       struct crypto_blkcipher *tfm_arc4;
+       struct scatterlist sgin, sgout;
+       struct blkcipher_desc desc;
+
+       get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
+
+       tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
+                                               0, CRYPTO_ALG_ASYNC);
+       if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
+               cERROR(1, "could not allocate " "master crypto API arc4\n");
+               return 1;
+       }
+
+       desc.tfm = tfm_arc4;
+
+       crypto_blkcipher_setkey(tfm_arc4,
+               server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
+       sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
+       sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
+       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
 
-       hmac_md5_update(v2_session_response+8,
-                       sizeof(struct ntlmv2_resp) - 8, &context);
+       if (!rc)
+               memcpy(server->session_key.data.ntlmv2.key,
+                               sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
+
+       crypto_free_blkcipher(tfm_arc4);
+
+       return 0;
+}
 
-       hmac_md5_final(v2_session_response, &context);
-/*     cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
+void
+cifs_crypto_shash_release(struct TCP_Server_Info *server)
+{
+       if (server->ntlmssp.md5)
+               crypto_free_shash(server->ntlmssp.md5);
+
+       if (server->ntlmssp.hmacmd5)
+               crypto_free_shash(server->ntlmssp.hmacmd5);
+
+       kfree(server->ntlmssp.sdeschmacmd5);
+
+       kfree(server->ntlmssp.sdescmd5);
+}
+
+int
+cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
+{
+       int rc;
+       unsigned int size;
+
+       server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
+       if (!server->ntlmssp.hmacmd5 ||
+                       IS_ERR(server->ntlmssp.hmacmd5)) {
+               cERROR(1, "could not allocate crypto hmacmd5\n");
+               return 1;
+       }
+
+       server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
+       if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
+               cERROR(1, "could not allocate crypto md5\n");
+               rc = 1;
+               goto cifs_crypto_shash_allocate_ret1;
+       }
+
+       size = sizeof(struct shash_desc) +
+                       crypto_shash_descsize(server->ntlmssp.hmacmd5);
+       server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
+       if (!server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
+               rc = -ENOMEM;
+               goto cifs_crypto_shash_allocate_ret2;
+       }
+       server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
+       server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
+
+
+       size = sizeof(struct shash_desc) +
+                       crypto_shash_descsize(server->ntlmssp.md5);
+       server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
+       if (!server->ntlmssp.sdescmd5) {
+               cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
+               rc = -ENOMEM;
+               goto cifs_crypto_shash_allocate_ret3;
+       }
+       server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
+       server->ntlmssp.sdescmd5->shash.flags = 0x0;
+
+       return 0;
+
+cifs_crypto_shash_allocate_ret3:
+       kfree(server->ntlmssp.sdeschmacmd5);
+
+cifs_crypto_shash_allocate_ret2:
+       crypto_free_shash(server->ntlmssp.md5);
+
+cifs_crypto_shash_allocate_ret1:
+       crypto_free_shash(server->ntlmssp.hmacmd5);
+
+       return rc;
 }
index 0cdfb8c32ac68c34a98f5cdf412250c0eacdec2b..c9d0cfc086ebcb609b504afdec6431e89ac75db9 100644 (file)
@@ -25,6 +25,9 @@
 #include <linux/workqueue.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
+#include <crypto/internal/hash.h>
+#include <linux/scatterlist.h>
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -97,7 +100,7 @@ enum protocolEnum {
        /* Netbios frames protocol not supported at this time */
 };
 
-struct mac_key {
+struct session_key {
        unsigned int len;
        union {
                char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -120,6 +123,21 @@ struct cifs_cred {
        struct cifs_ace *aces;
 };
 
+struct sdesc {
+       struct shash_desc shash;
+       char ctx[];
+};
+
+struct ntlmssp_auth {
+       __u32 client_flags;
+       __u32 server_flags;
+       unsigned char ciphertext[CIFS_CPHTXT_SIZE];
+       struct crypto_shash *hmacmd5;
+       struct crypto_shash *md5;
+       struct sdesc *sdeschmacmd5;
+       struct sdesc *sdescmd5;
+};
+
 /*
  *****************************************************************
  * Except the CIFS PDUs themselves all the
@@ -182,11 +200,14 @@ struct TCP_Server_Info {
        /* 16th byte of RFC1001 workstation name is always null */
        char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
        __u32 sequence_number; /* needed for CIFS PDU signature */
-       struct mac_key mac_signing_key;
+       struct session_key session_key;
        char ntlmv2_hash[16];
        unsigned long lstrp; /* when we got last response from this server */
        u16 dialect; /* dialect index that server chose */
        /* extended security flavors that server supports */
+       unsigned int tilen; /* length of the target info blob */
+       unsigned char *tiblob; /* target info blob in challenge response */
+       struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
        bool    sec_kerberos;           /* supports plain Kerberos */
        bool    sec_mskerberos;         /* supports legacy MS Kerberos */
        bool    sec_kerberosu2u;        /* supports U2U Kerberos */
index 14d036d8db111f2719f9e50576e94024a105adfc..320e0fd0ba7b5f988b559e060173dc61c2aa064c 100644 (file)
  * Size of the session key (crypto key encrypted with the password
  */
 #define CIFS_SESS_KEY_SIZE (24)
+#define CIFS_CLIENT_CHALLENGE_SIZE (8)
+#define CIFS_SERVER_CHALLENGE_SIZE (8)
+#define CIFS_HMAC_MD5_HASH_SIZE (16)
+#define CIFS_CPHTXT_SIZE (16)
+#define CIFS_NTLMV2_SESSKEY_SIZE (16)
+#define CIFS_NTHASH_SIZE (16)
 
 /*
  * Maximum user name length
@@ -663,7 +669,6 @@ struct ntlmv2_resp {
        __le64  time;
        __u64  client_chal; /* random */
        __u32  reserved2;
-       struct ntlmssp2_name names[2];
        /* array of name entries could follow ending in minimum 4 byte struct */
 } __attribute__((packed));
 
index 1f5450814087bed34651f0c2ac4834d8251d63e6..1378d9133844f08a369057608ed9d312bebc732f 100644 (file)
@@ -361,15 +361,15 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
                          __u32 *);
 extern int cifs_verify_signature(struct smb_hdr *,
-                                const struct mac_key *mac_key,
+                                struct TCP_Server_Info *server,
                                __u32 expected_sequence_number);
-extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
                                 const char *pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
-                       const struct nls_table *);
-extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
-extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
                             const struct nls_table *);
+extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
+extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
+extern int calc_seckey(struct TCP_Server_Info *);
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 extern void calc_lanman_hash(const char *password, const char *cryptkey,
                                bool encrypt, char *lnm_session_key);
index c65c3419dd3703f12bb4994e9333c085c907ecfa..4bda920d1f754548ea705b94b13be9a1db321699 100644 (file)
@@ -604,11 +604,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        else
                                rc = -EINVAL;
 
-                       if (server->sec_kerberos || server->sec_mskerberos)
-                               server->secType = Kerberos;
-                       else if (server->sec_ntlmssp)
-                               server->secType = RawNTLMSSP;
-                       else
+                       if (server->secType == Kerberos) {
+                               if (!server->sec_kerberos &&
+                                               !server->sec_mskerberos)
+                                       rc = -EOPNOTSUPP;
+                       } else if (server->secType == RawNTLMSSP) {
+                               if (!server->sec_ntlmssp)
+                                       rc = -EOPNOTSUPP;
+                       } else
                                rc = -EOPNOTSUPP;
                }
        } else
index 95c2ea67edfb8f240d286885caddd51031aa82fc..ec0ea4a43bdb4efc0f3734f439b6af78af97a2f4 100644 (file)
@@ -1673,7 +1673,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
                                    MAX_USERNAME_SIZE))
                                continue;
                        if (strlen(vol->username) != 0 &&
-                           strncmp(ses->password, vol->password,
+                           ses->password != NULL &&
+                           strncmp(ses->password,
+                                   vol->password ? vol->password : "",
                                    MAX_PASSWORD_SIZE))
                                continue;
                }
@@ -1706,6 +1708,7 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
                CIFSSMBLogoff(xid, ses);
                _FreeXid(xid);
        }
+       cifs_crypto_shash_release(server);
        sesInfoFree(ses);
        cifs_put_tcp_session(server);
 }
@@ -1785,13 +1788,23 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        ses->linux_uid = volume_info->linux_uid;
        ses->overrideSecFlg = volume_info->secFlg;
 
+       rc = cifs_crypto_shash_allocate(server);
+       if (rc) {
+               cERROR(1, "could not setup hash structures rc %d", rc);
+               goto get_ses_fail;
+       }
+       server->tilen = 0;
+       server->tiblob = NULL;
+
        mutex_lock(&ses->session_mutex);
        rc = cifs_negotiate_protocol(xid, ses);
        if (!rc)
                rc = cifs_setup_session(xid, ses, volume_info->local_nls);
        mutex_unlock(&ses->session_mutex);
-       if (rc)
+       if (rc) {
+               cifs_crypto_shash_release(ses->server);
                goto get_ses_fail;
+       }
 
        /* success, put it on the list */
        write_lock(&cifs_tcp_ses_lock);
index 578d88c5b46e7eae539fb04aede8f59b7c9dac53..f9ed0751cc12cb6f93e498b526c07e215bb91138 100644 (file)
@@ -305,8 +305,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
        full_path = build_path_from_dentry(direntry);
        if (full_path == NULL) {
                rc = -ENOMEM;
-               FreeXid(xid);
-               return rc;
+               goto cifs_create_out;
        }
 
        if (oplockEnabled)
@@ -365,9 +364,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
 
        buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
        if (buf == NULL) {
-               kfree(full_path);
-               FreeXid(xid);
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto cifs_create_out;
        }
 
        /*
@@ -496,6 +494,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
        struct cifsTconInfo *pTcon;
        char *full_path = NULL;
        struct inode *newinode = NULL;
+       int oplock = 0;
+       u16 fileHandle;
+       FILE_ALL_INFO *buf = NULL;
+       unsigned int bytes_written;
+       struct win_dev *pdev;
 
        if (!old_valid_dev(device_number))
                return -EINVAL;
@@ -506,9 +509,12 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
        pTcon = cifs_sb->tcon;
 
        full_path = build_path_from_dentry(direntry);
-       if (full_path == NULL)
+       if (full_path == NULL) {
                rc = -ENOMEM;
-       else if (pTcon->unix_ext) {
+               goto mknod_out;
+       }
+
+       if (pTcon->unix_ext) {
                struct cifs_unix_set_info_args args = {
                        .mode   = mode & ~current_umask(),
                        .ctime  = NO_CHANGE_64,
@@ -527,87 +533,78 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
                                            cifs_sb->local_nls,
                                            cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
+               if (rc)
+                       goto mknod_out;
 
-               if (!rc) {
-                       rc = cifs_get_inode_info_unix(&newinode, full_path,
+               rc = cifs_get_inode_info_unix(&newinode, full_path,
                                                inode->i_sb, xid);
-                       if (pTcon->nocase)
-                               direntry->d_op = &cifs_ci_dentry_ops;
-                       else
-                               direntry->d_op = &cifs_dentry_ops;
-                       if (rc == 0)
-                               d_instantiate(direntry, newinode);
-               }
-       } else {
-               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
-                       int oplock = 0;
-                       u16 fileHandle;
-                       FILE_ALL_INFO *buf;
+               if (pTcon->nocase)
+                       direntry->d_op = &cifs_ci_dentry_ops;
+               else
+                       direntry->d_op = &cifs_dentry_ops;
 
-                       cFYI(1, "sfu compat create special file");
+               if (rc == 0)
+                       d_instantiate(direntry, newinode);
+               goto mknod_out;
+       }
 
-                       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
-                       if (buf == NULL) {
-                               kfree(full_path);
-                               rc = -ENOMEM;
-                               FreeXid(xid);
-                               return rc;
-                       }
+       if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+               goto mknod_out;
 
-                       rc = CIFSSMBOpen(xid, pTcon, full_path,
-                                        FILE_CREATE, /* fail if exists */
-                                        GENERIC_WRITE /* BB would
-                                         WRITE_OWNER | WRITE_DAC be better? */,
-                                        /* Create a file and set the
-                                           file attribute to SYSTEM */
-                                        CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
-                                        &fileHandle, &oplock, buf,
-                                        cifs_sb->local_nls,
-                                        cifs_sb->mnt_cifs_flags &
-                                           CIFS_MOUNT_MAP_SPECIAL_CHR);
-
-                       /* BB FIXME - add handling for backlevel servers
-                          which need legacy open and check for all
-                          calls to SMBOpen for fallback to SMBLeagcyOpen */
-                       if (!rc) {
-                               /* BB Do not bother to decode buf since no
-                                  local inode yet to put timestamps in,
-                                  but we can reuse it safely */
-                               unsigned int bytes_written;
-                               struct win_dev *pdev;
-                               pdev = (struct win_dev *)buf;
-                               if (S_ISCHR(mode)) {
-                                       memcpy(pdev->type, "IntxCHR", 8);
-                                       pdev->major =
-                                             cpu_to_le64(MAJOR(device_number));
-                                       pdev->minor =
-                                             cpu_to_le64(MINOR(device_number));
-                                       rc = CIFSSMBWrite(xid, pTcon,
-                                               fileHandle,
-                                               sizeof(struct win_dev),
-                                               0, &bytes_written, (char *)pdev,
-                                               NULL, 0);
-                               } else if (S_ISBLK(mode)) {
-                                       memcpy(pdev->type, "IntxBLK", 8);
-                                       pdev->major =
-                                             cpu_to_le64(MAJOR(device_number));
-                                       pdev->minor =
-                                             cpu_to_le64(MINOR(device_number));
-                                       rc = CIFSSMBWrite(xid, pTcon,
-                                               fileHandle,
-                                               sizeof(struct win_dev),
-                                               0, &bytes_written, (char *)pdev,
-                                               NULL, 0);
-                               } /* else if(S_ISFIFO */
-                               CIFSSMBClose(xid, pTcon, fileHandle);
-                               d_drop(direntry);
-                       }
-                       kfree(buf);
-                       /* add code here to set EAs */
-               }
+
+       cFYI(1, "sfu compat create special file");
+
+       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+       if (buf == NULL) {
+               kfree(full_path);
+               rc = -ENOMEM;
+               FreeXid(xid);
+               return rc;
        }
 
+       /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */
+       rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
+                        GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
+                        &fileHandle, &oplock, buf, cifs_sb->local_nls,
+                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+       if (rc)
+               goto mknod_out;
+
+       /* BB Do not bother to decode buf since no local inode yet to put
+        * timestamps in, but we can reuse it safely */
+
+       pdev = (struct win_dev *)buf;
+       if (S_ISCHR(mode)) {
+               memcpy(pdev->type, "IntxCHR", 8);
+               pdev->major =
+                     cpu_to_le64(MAJOR(device_number));
+               pdev->minor =
+                     cpu_to_le64(MINOR(device_number));
+               rc = CIFSSMBWrite(xid, pTcon,
+                       fileHandle,
+                       sizeof(struct win_dev),
+                       0, &bytes_written, (char *)pdev,
+                       NULL, 0);
+       } else if (S_ISBLK(mode)) {
+               memcpy(pdev->type, "IntxBLK", 8);
+               pdev->major =
+                     cpu_to_le64(MAJOR(device_number));
+               pdev->minor =
+                     cpu_to_le64(MINOR(device_number));
+               rc = CIFSSMBWrite(xid, pTcon,
+                       fileHandle,
+                       sizeof(struct win_dev),
+                       0, &bytes_written, (char *)pdev,
+                       NULL, 0);
+       } /* else if (S_ISFIFO) */
+       CIFSSMBClose(xid, pTcon, fileHandle);
+       d_drop(direntry);
+
+       /* FIXME: add code here to set EAs */
+
+mknod_out:
        kfree(full_path);
+       kfree(buf);
        FreeXid(xid);
        return rc;
 }
index db11fdef0e92b1ca52a75fb033f03dda24e8f6a5..de748c652d11f4fca214b2aed62c8ad1639ae2dc 100644 (file)
@@ -242,8 +242,7 @@ int cifs_open(struct inode *inode, struct file *file)
        full_path = build_path_from_dentry(file->f_path.dentry);
        if (full_path == NULL) {
                rc = -ENOMEM;
-               FreeXid(xid);
-               return rc;
+               goto out;
        }
 
        cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
index 4bc47e5b5f29af38d601f699face0e1f16b31a9f..86a164f08a74a51399c2152799ec178c93902fa6 100644 (file)
@@ -834,7 +834,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
                                                xid, NULL);
 
        if (!inode)
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(rc);
 
 #ifdef CONFIG_CIFS_FSCACHE
        /* populate tcon->resource_id */
index 49c9a4e7531979c3e65615dd277ad4b0815ed0ae..1db0f0746a5b4242f927e9203d8749711180918c 100644 (file)
 #define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
 #define NTLMSSP_NEGOTIATE_56        0x80000000
 
+/* Define AV Pair Field IDs */
+#define NTLMSSP_AV_EOL                 0
+#define NTLMSSP_AV_NB_COMPUTER_NAME    1
+#define NTLMSSP_AV_NB_DOMAIN_NAME      2
+#define NTLMSSP_AV_DNS_COMPUTER_NAME   3
+#define NTLMSSP_AV_DNS_DOMAIN_NAME     4
+#define NTLMSSP_AV_DNS_TREE_NAME       5
+#define NTLMSSP_AV_FLAGS               6
+#define NTLMSSP_AV_TIMESTAMP           7
+#define NTLMSSP_AV_RESTRICTION         8
+#define NTLMSSP_AV_TARGET_NAME         9
+#define NTLMSSP_AV_CHANNEL_BINDINGS    10
+
 /* Although typedefs are not commonly used for structure definitions */
 /* in the Linux kernel, in this particular case they are useful      */
 /* to more closely match the standards document for NTLMSSP from     */
index 0a57cb7db5dd7554030e599cd111379e343083df..795095f4eac69ba204257a597522e465dafac371 100644 (file)
@@ -383,6 +383,9 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
 static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                                    struct cifsSesInfo *ses)
 {
+       unsigned int tioffset; /* challeng message target info area */
+       unsigned int tilen; /* challeng message target info area length  */
+
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -405,6 +408,20 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
 
+       ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
+       tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
+       tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+       ses->server->tilen = tilen;
+       if (tilen) {
+               ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
+               if (!ses->server->tiblob) {
+                       cERROR(1, "Challenge target info allocation failure");
+                       return -ENOMEM;
+               }
+               memcpy(ses->server->tiblob,  bcc_ptr + tioffset, tilen);
+       }
+
        return 0;
 }
 
@@ -425,12 +442,13 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        /* BB is NTLMV2 session security format easier to use here? */
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
+               NTLMSSP_NEGOTIATE_NTLM;
        if (ses->server->secMode &
-          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
-               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
+               flags |= NTLMSSP_NEGOTIATE_SIGN |
+                       NTLMSSP_NEGOTIATE_KEY_XCH |
+                       NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+       }
 
        sec_blob->NegotiateFlags |= cpu_to_le32(flags);
 
@@ -451,10 +469,12 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                                   struct cifsSesInfo *ses,
                                   const struct nls_table *nls_cp, bool first)
 {
+       int rc;
+       unsigned int size;
        AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
        __u32 flags;
        unsigned char *tmp;
-       char ntlm_session_key[CIFS_SESS_KEY_SIZE];
+       struct ntlmv2_resp ntlmv2_response = {};
 
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
@@ -477,19 +497,25 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->LmChallengeResponse.Length = 0;
        sec_blob->LmChallengeResponse.MaximumLength = 0;
 
-       /* calculate session key,  BB what about adding similar ntlmv2 path? */
-       SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
-       if (first)
-               cifs_calculate_mac_key(&ses->server->mac_signing_key,
-                                      ntlm_session_key, ses->password);
-
-       memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
        sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
-       sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
-       sec_blob->NtChallengeResponse.MaximumLength =
-                               cpu_to_le16(CIFS_SESS_KEY_SIZE);
+       rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
+       if (rc) {
+               cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc);
+               goto setup_ntlmv2_ret;
+       }
+       size =  sizeof(struct ntlmv2_resp);
+       memcpy(tmp, (char *)&ntlmv2_response, size);
+       tmp += size;
+       if (ses->server->tilen > 0) {
+               memcpy(tmp, ses->server->tiblob, ses->server->tilen);
+               tmp += ses->server->tilen;
+       } else
+               ses->server->tilen = 0;
 
-       tmp += CIFS_SESS_KEY_SIZE;
+       sec_blob->NtChallengeResponse.Length = cpu_to_le16(size +
+                               ses->server->tilen);
+       sec_blob->NtChallengeResponse.MaximumLength =
+               cpu_to_le16(size + ses->server->tilen);
 
        if (ses->domainName == NULL) {
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -501,7 +527,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               len += 2; /* trailing null */
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
                sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -518,7 +543,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               len += 2; /* trailing null */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
                sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -530,9 +554,26 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->WorkstationName.MaximumLength = 0;
        tmp += 2;
 
-       sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
-       sec_blob->SessionKey.Length = 0;
-       sec_blob->SessionKey.MaximumLength = 0;
+       if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+                       !calc_seckey(ses->server)) {
+               memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
+               sec_blob->SessionKey.MaximumLength =
+                       cpu_to_le16(CIFS_CPHTXT_SIZE);
+               tmp += CIFS_CPHTXT_SIZE;
+       } else {
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.Length = 0;
+               sec_blob->SessionKey.MaximumLength = 0;
+       }
+
+       ses->server->sequence_number = 0;
+
+setup_ntlmv2_ret:
+       if (ses->server->tilen > 0)
+               kfree(ses->server->tiblob);
+
        return tmp - pbuffer;
 }
 
@@ -546,15 +587,14 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
        return;
 }
 
-static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
+static int setup_ntlmssp_auth_req(char *ntlmsspblob,
                                  struct cifsSesInfo *ses,
                                  const struct nls_table *nls, bool first_time)
 {
        int bloblen;
 
-       bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
+       bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls,
                                          first_time);
-       pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
 
        return bloblen;
 }
@@ -690,7 +730,7 @@ ssetup_ntlmssp_authenticate:
 
                if (first_time) /* should this be moved into common code
                                  with similar ntlmv2 path? */
-                       cifs_calculate_mac_key(&ses->server->mac_signing_key,
+                       cifs_calculate_session_key(&ses->server->session_key,
                                ntlm_session_key, ses->password);
                /* copy session key */
 
@@ -729,12 +769,21 @@ ssetup_ntlmssp_authenticate:
                        cpu_to_le16(sizeof(struct ntlmv2_resp));
 
                /* calculate session key */
-               setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+               rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+               if (rc) {
+                       kfree(v2_sess_key);
+                       goto ssetup_exit;
+               }
                /* FIXME: calculate MAC key */
                memcpy(bcc_ptr, (char *)v2_sess_key,
                       sizeof(struct ntlmv2_resp));
                bcc_ptr += sizeof(struct ntlmv2_resp);
                kfree(v2_sess_key);
+               if (ses->server->tilen > 0) {
+                       memcpy(bcc_ptr, ses->server->tiblob,
+                               ses->server->tilen);
+                       bcc_ptr += ses->server->tilen;
+               }
                if (ses->capabilities & CAP_UNICODE) {
                        if (iov[0].iov_len % 2) {
                                *bcc_ptr = 0;
@@ -765,15 +814,15 @@ ssetup_ntlmssp_authenticate:
                }
                /* bail out if key is too long */
                if (msg->sesskey_len >
-                   sizeof(ses->server->mac_signing_key.data.krb5)) {
+                   sizeof(ses->server->session_key.data.krb5)) {
                        cERROR(1, "Kerberos signing key too long (%u bytes)",
                                msg->sesskey_len);
                        rc = -EOVERFLOW;
                        goto ssetup_exit;
                }
                if (first_time) {
-                       ses->server->mac_signing_key.len = msg->sesskey_len;
-                       memcpy(ses->server->mac_signing_key.data.krb5,
+                       ses->server->session_key.len = msg->sesskey_len;
+                       memcpy(ses->server->session_key.data.krb5,
                                msg->data, msg->sesskey_len);
                }
                pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -815,12 +864,28 @@ ssetup_ntlmssp_authenticate:
                        if (phase == NtLmNegotiate) {
                                setup_ntlmssp_neg_req(pSMB, ses);
                                iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
+                               iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        } else if (phase == NtLmAuthenticate) {
                                int blob_len;
-                               blob_len = setup_ntlmssp_auth_req(pSMB, ses,
-                                                                 nls_cp,
-                                                                 first_time);
+                               char *ntlmsspblob;
+
+                               ntlmsspblob = kmalloc(5 *
+                                       sizeof(struct _AUTHENTICATE_MESSAGE),
+                                       GFP_KERNEL);
+                               if (!ntlmsspblob) {
+                                       cERROR(1, "Can't allocate NTLMSSP");
+                                       rc = -ENOMEM;
+                                       goto ssetup_exit;
+                               }
+
+                               blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
+                                                               ses,
+                                                               nls_cp,
+                                                               first_time);
                                iov[1].iov_len = blob_len;
+                               iov[1].iov_base = ntlmsspblob;
+                               pSMB->req.SecurityBlobLength =
+                                       cpu_to_le16(blob_len);
                                /* Make sure that we tell the server that we
                                   are using the uid that it just gave us back
                                   on the response (challenge) */
@@ -830,7 +895,6 @@ ssetup_ntlmssp_authenticate:
                                rc = -ENOSYS;
                                goto ssetup_exit;
                        }
-                       iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        /* unicode strings must be word aligned */
                        if ((iov[0].iov_len + iov[1].iov_len) % 2) {
                                *bcc_ptr = 0;
index 82f78c4d6978ceafdab5789182193b899a435202..e0588cdf4cc5d5a1e8a73c1190c21f2d6cbe4986 100644 (file)
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(midQ->resp_buf,
-                                               &ses->server->mac_signing_key,
+                                               ses->server,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(out_buf,
-                                               &ses->server->mac_signing_key,
+                                               ses->server,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
            (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                     SECMODE_SIGN_ENABLED))) {
                rc = cifs_verify_signature(out_buf,
-                                          &ses->server->mac_signing_key,
+                                          ses->server,
                                           midQ->sequence_number+1);
                if (rc) {
                        cERROR(1, "Unexpected SMB signature");
index 51f270b479b6938a4a730ea56f9011c30f99563f..48d74c7391d13f4f07393c45d19825e937ecbcd1 100644 (file)
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
        int ret = 0;
 
        if (dio->bio) {
-               loff_t cur_offset = dio->block_in_file << dio->blkbits;
+               loff_t cur_offset = dio->cur_page_fs_offset;
                loff_t bio_next_offset = dio->logical_offset_in_bio +
                        dio->bio->bi_size;
 
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
                 * Submit now if the underlying fs is about to perform a
                 * metadata read
                 */
-               if (dio->boundary)
+               else if (dio->boundary)
                        dio_bio_submit(dio);
        }
 
index a2e3b562e65d996f62d1ef15eaa9c6e5a7a2293f..cbadc1bee6e7ecceeefa5131143e08783d006297 100644 (file)
@@ -1793,7 +1793,7 @@ struct kmem_cache *ecryptfs_key_tfm_cache;
 static struct list_head key_tfm_list;
 struct mutex key_tfm_list_mutex;
 
-int ecryptfs_init_crypto(void)
+int __init ecryptfs_init_crypto(void)
 {
        mutex_init(&key_tfm_list_mutex);
        INIT_LIST_HEAD(&key_tfm_list);
@@ -2169,7 +2169,6 @@ int ecryptfs_encrypt_and_encode_filename(
                                (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
                                 + encoded_name_no_prefix_size);
                        (*encoded_name)[(*encoded_name_size)] = '\0';
-                       (*encoded_name_size)++;
                } else {
                        rc = -EOPNOTSUPP;
                }
index 6c55113e72222cf473f92b16277056561ebbf55d..3fbc94203380acf8e6095627ee610ff058df5f38 100644 (file)
@@ -349,7 +349,7 @@ out:
 
 /**
  * ecryptfs_new_lower_dentry
- * @ename: The name of the new dentry.
+ * @name: The name of the new dentry.
  * @lower_dir_dentry: Parent directory of the new dentry.
  * @nd: nameidata from last lookup.
  *
@@ -386,20 +386,19 @@ ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
  * ecryptfs_lookup_one_lower
  * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
  * @lower_dir_dentry: lower parent directory
+ * @name: lower file name
  *
  * Get the lower dentry from vfs. If lower dentry does not exist yet,
  * create it.
  */
 static struct dentry *
 ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
-                         struct dentry *lower_dir_dentry)
+                         struct dentry *lower_dir_dentry, struct qstr *name)
 {
        struct nameidata nd;
        struct vfsmount *lower_mnt;
-       struct qstr *name;
        int err;
 
-       name = &ecryptfs_dentry->d_name;
        lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
                                    ecryptfs_dentry->d_parent));
        err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
@@ -434,6 +433,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
        size_t encrypted_and_encoded_name_size;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
        struct dentry *lower_dir_dentry, *lower_dentry;
+       struct qstr lower_name;
        int rc = 0;
 
        ecryptfs_dentry->d_op = &ecryptfs_dops;
@@ -444,9 +444,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
                goto out_d_drop;
        }
        lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
-
+       lower_name.name = ecryptfs_dentry->d_name.name;
+       lower_name.len = ecryptfs_dentry->d_name.len;
+       lower_name.hash = ecryptfs_dentry->d_name.hash;
+       if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
+               rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
+                                                   &lower_name);
+               if (rc < 0)
+                       goto out_d_drop;
+       }
        lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-                                                lower_dir_dentry);
+                                                lower_dir_dentry, &lower_name);
        if (IS_ERR(lower_dentry)) {
                rc = PTR_ERR(lower_dentry);
                ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
@@ -471,8 +479,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
                       "filename; rc = [%d]\n", __func__, rc);
                goto out_d_drop;
        }
+       lower_name.name = encrypted_and_encoded_name;
+       lower_name.len = encrypted_and_encoded_name_size;
+       lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
+       if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
+               rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
+                                                   &lower_name);
+               if (rc < 0)
+                       goto out_d_drop;
+       }
        lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-                                                lower_dir_dentry);
+                                                lower_dir_dentry, &lower_name);
        if (IS_ERR(lower_dentry)) {
                rc = PTR_ERR(lower_dentry);
                ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
index 89c5476506ef36c8c3de7520565b8360d70eb83b..73811cfa2ea4369766a99a251c614ddb091bc8f4 100644 (file)
@@ -515,6 +515,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
+               rc = -ENOMEM;
                goto out;
        }
        s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -806,6 +807,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
+               rc = -ENOMEM;
                goto out;
        }
        s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
index d8c3a373aafa40547090427c0ae1e96b73f49dbe..0851ab6980f54038b8f0ed9239712fea573622c5 100644 (file)
@@ -86,7 +86,7 @@ out:
        return 0;
 }
 
-int ecryptfs_init_kthread(void)
+int __init ecryptfs_init_kthread(void)
 {
        int rc = 0;
 
index bcb68c0cb1f0fed8ac5300c804b304879b08ae22..ab2248090515534dfd47b940b83aef63c696b545 100644 (file)
@@ -473,7 +473,7 @@ sleep:
        return rc;
 }
 
-int ecryptfs_init_messaging(void)
+int __init ecryptfs_init_messaging(void)
 {
        int i;
        int rc = 0;
index 3745f612bcd438cb477f8bb245afeb5f47495f89..00208c3d7e926cf799974ef23d5bdaebd72151c8 100644 (file)
@@ -500,7 +500,7 @@ static struct miscdevice ecryptfs_miscdev = {
  *
  * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_init_ecryptfs_miscdev(void)
+int __init ecryptfs_init_ecryptfs_miscdev(void)
 {
        int rc;
 
index 6769fd0f35b88373fdb8d0b265668a976a7ab251..f8cc34f542c3a1cc8b4f53309ffae317ecdddf15 100644 (file)
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
 
 static int __init fcntl_init(void)
 {
-       /* please add new bits here to ensure allocation uniqueness */
-       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       /*
+        * Please add new bits here to ensure allocation uniqueness.
+        * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+        * is defined as O_NONBLOCK on some platforms and not on others.
+        */
+       BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
-               O_TRUNC         | O_APPEND      | O_NONBLOCK    |
+               O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
index 69ad053ffd78cb0f2669516b5327571f37d65254..d367af1514efe696b50a73374fffe77784c65b6a 100644 (file)
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
  * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
 
 /* Wait until a request is available on the pending list */
 static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        DECLARE_WAITQUEUE(wait, current);
 
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
  */
 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
        }
 }
 
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+       fc->max_background = UINT_MAX;
+       flush_bg_queue(fc);
+       end_requests(fc, &fc->pending);
+       end_requests(fc, &fc->processing);
+}
+
 /*
  * Abort all requests.
  *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
                fc->connected = 0;
                fc->blocked = 0;
                end_io_requests(fc);
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               end_queued_requests(fc);
                wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
                kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fc) {
                spin_lock(&fc->lock);
                fc->connected = 0;
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               fc->blocked = 0;
+               end_queued_requests(fc);
+               wake_up_all(&fc->blocked_waitq);
                spin_unlock(&fc->lock);
                fuse_conn_put(fc);
        }
index 147c1f71bdb9f0213307fd3e63f6e3b30fc3f403..c8224587123f6e2ff84c8933f8d56a50ffd06c80 100644 (file)
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
 
 /* Called under fc->lock, may release and reacquire it */
 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
        loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
  * Called with fc->lock
  */
 void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
index e20ee85955d1c77c3a410da2c82893cd38acce8e..f3f3578393a417085812ba1ad7e0b7c1e4e5c981 100644 (file)
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
 
        inode_inc_link_count(dir);
 
-       inode = minix_new_inode(dir, mode, &err);
+       inode = minix_new_inode(dir, S_IFDIR | mode, &err);
        if (!inode)
                goto out_dir;
 
index de402eb6eafbad3df3957ca7b74ee92256c1e8c4..a72eaabfe8f2a58868e96397b320ba819e159884 100644 (file)
@@ -1483,6 +1483,23 @@ out_unlock:
        return err;
 }
 
+/*
+ * Sanity check the flags to change_mnt_propagation.
+ */
+
+static int flags_to_propagation_type(int flags)
+{
+       int type = flags & ~MS_REC;
+
+       /* Fail if any non-propagation flags are set */
+       if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+               return 0;
+       /* Only one propagation flag should be set */
+       if (!is_power_of_2(type))
+               return 0;
+       return type;
+}
+
 /*
  * recursively change the type of the mountpoint.
  */
@@ -1490,7 +1507,7 @@ static int do_change_type(struct path *path, int flag)
 {
        struct vfsmount *m, *mnt = path->mnt;
        int recurse = flag & MS_REC;
-       int type = flag & ~MS_REC;
+       int type;
        int err = 0;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -1499,6 +1516,10 @@ static int do_change_type(struct path *path, int flag)
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
+       type = flags_to_propagation_type(flag);
+       if (!type)
+               return -EINVAL;
+
        down_write(&namespace_sem);
        if (type == MS_SHARED) {
                err = invent_group_ids(mnt, recurse);
index 2e7357104cfdf3208a6abe357c91e92536d8e815..cf0d2ffb3c84a149bc904323cd53599620c8c917 100644 (file)
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
 
 static int nfs4_access_to_omode(u32 access)
 {
-       switch (access) {
+       switch (access & NFS4_SHARE_ACCESS_BOTH) {
        case NFS4_SHARE_ACCESS_READ:
                return O_RDONLY;
        case NFS4_SHARE_ACCESS_WRITE:
@@ -2450,14 +2450,13 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
 static __be32
 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
 {
-       u32 op_share_access, new_access;
+       u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
+       bool new_access;
        __be32 status;
 
-       set_access(&new_access, stp->st_access_bmap);
-       new_access = (~new_access) & open->op_share_access & ~NFS4_SHARE_WANT_MASK;
-
+       new_access = !test_bit(op_share_access, &stp->st_access_bmap);
        if (new_access) {
-               status = nfs4_get_vfs_file(rqstp, fp, cur_fh, new_access);
+               status = nfs4_get_vfs_file(rqstp, fp, cur_fh, op_share_access);
                if (status)
                        return status;
        }
@@ -2470,7 +2469,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
                return status;
        }
        /* remember the open */
-       op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
        __set_bit(op_share_access, &stp->st_access_bmap);
        __set_bit(open->op_share_deny, &stp->st_deny_bmap);
 
@@ -2983,7 +2981,6 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
                                *filpp = find_readable_file(stp->st_file);
                        else
                                *filpp = find_writeable_file(stp->st_file);
-                       BUG_ON(!*filpp); /* assured by check_openmode */
                }
        }
        status = nfs_ok;
@@ -3561,7 +3558,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfs4_stateowner *open_sop = NULL;
        struct nfs4_stateowner *lock_sop = NULL;
        struct nfs4_stateid *lock_stp;
-       struct file *filp;
+       struct nfs4_file *fp;
+       struct file *filp = NULL;
        struct file_lock file_lock;
        struct file_lock conflock;
        __be32 status = 0;
@@ -3591,7 +3589,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                 * lock stateid.
                 */
                struct nfs4_stateid *open_stp = NULL;
-               struct nfs4_file *fp;
                
                status = nfserr_stale_clientid;
                if (!nfsd4_has_session(cstate) &&
@@ -3634,6 +3631,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                if (status)
                        goto out;
                lock_sop = lock->lk_replay_owner;
+               fp = lock_stp->st_file;
        }
        /* lock->lk_replay_owner and lock_stp have been created or found */
 
@@ -3648,13 +3646,19 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        switch (lock->lk_type) {
                case NFS4_READ_LT:
                case NFS4_READW_LT:
-                       filp = find_readable_file(lock_stp->st_file);
+                       if (find_readable_file(lock_stp->st_file)) {
+                               nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
+                               filp = find_readable_file(lock_stp->st_file);
+                       }
                        file_lock.fl_type = F_RDLCK;
                        cmd = F_SETLK;
                break;
                case NFS4_WRITE_LT:
                case NFS4_WRITEW_LT:
-                       filp = find_writeable_file(lock_stp->st_file);
+                       if (find_writeable_file(lock_stp->st_file)) {
+                               nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
+                               filp = find_writeable_file(lock_stp->st_file);
+                       }
                        file_lock.fl_type = F_WRLCK;
                        cmd = F_SETLK;
                break;
index 7731a75971ddf88347b7ed634604e72fcc0b027c..322518c88e4b09eee40473ed8efaece99e02410b 100644 (file)
@@ -363,23 +363,23 @@ struct nfs4_file {
  * at all? */
 static inline struct file *find_writeable_file(struct nfs4_file *f)
 {
-       if (f->fi_fds[O_RDWR])
-               return f->fi_fds[O_RDWR];
-       return f->fi_fds[O_WRONLY];
+       if (f->fi_fds[O_WRONLY])
+               return f->fi_fds[O_WRONLY];
+       return f->fi_fds[O_RDWR];
 }
 
 static inline struct file *find_readable_file(struct nfs4_file *f)
 {
-       if (f->fi_fds[O_RDWR])
-               return f->fi_fds[O_RDWR];
-       return f->fi_fds[O_RDONLY];
+       if (f->fi_fds[O_RDONLY])
+               return f->fi_fds[O_RDONLY];
+       return f->fi_fds[O_RDWR];
 }
 
 static inline struct file *find_any_file(struct nfs4_file *f)
 {
        if (f->fi_fds[O_RDWR])
                return f->fi_fds[O_RDWR];
-       else if (f->fi_fds[O_RDWR])
+       else if (f->fi_fds[O_WRONLY])
                return f->fi_fds[O_WRONLY];
        else
                return f->fi_fds[O_RDONLY];
index 96360a83cb91f670d7a0d5df012ebf75324f4d7e..661a6cf8e8265eaeca988e2aabc0ba1ede767717 100644 (file)
@@ -2033,15 +2033,17 @@ out:
 __be32
 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
 {
-       struct path path = {
-               .mnt    = fhp->fh_export->ex_path.mnt,
-               .dentry = fhp->fh_dentry,
-       };
        __be32 err;
 
        err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
-       if (!err && vfs_statfs(&path, stat))
-               err = nfserr_io;
+       if (!err) {
+               struct path path = {
+                       .mnt    = fhp->fh_export->ex_path.mnt,
+                       .dentry = fhp->fh_dentry,
+               };
+               if (vfs_statfs(&path, stat))
+                       err = nfserr_io;
+       }
        return err;
 }
 
index 4317f177ea7cb27c46cb315f5055205f3a0addc7..ba7c10c917fcd1545a668c53f6abb2819b04f3bc 100644 (file)
@@ -446,6 +446,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
        nilfs_mdt_destroy(nilfs->ns_cpfile);
        nilfs_mdt_destroy(nilfs->ns_sufile);
        nilfs_mdt_destroy(nilfs->ns_dat);
+       nilfs_mdt_destroy(nilfs->ns_gc_dat);
 
  failed:
        nilfs_clear_recovery_info(&ri);
index 756566fe844909c2499f3e5a53903fd2f62f2419..85366c78cc3759b1a0027e1a3d5d0598e2f2fdec 100644 (file)
@@ -165,9 +165,6 @@ static bool fanotify_should_send_event(struct fsnotify_group *group,
                 "mask=%x data=%p data_type=%d\n", __func__, group, to_tell,
                 inode_mark, vfsmnt_mark, event_mask, data, data_type);
 
-       pr_debug("%s: group=%p vfsmount_mark=%p inode_mark=%p mask=%x\n",
-                __func__, group, vfsmnt_mark, inode_mark, event_mask);
-
        /* sorry, fanotify only gives a damn about files and dirs */
        if (!S_ISREG(to_tell->i_mode) &&
            !S_ISDIR(to_tell->i_mode))
index 032b837fcd11912e54a01920ddf40b9506f1c8f9..5ed8e58d7bfc316f44c056e6208c3787dbc5f445 100644 (file)
@@ -195,6 +195,14 @@ static int prepare_for_access_response(struct fsnotify_group *group,
        re->fd = fd;
 
        mutex_lock(&group->fanotify_data.access_mutex);
+
+       if (group->fanotify_data.bypass_perm) {
+               mutex_unlock(&group->fanotify_data.access_mutex);
+               kmem_cache_free(fanotify_response_event_cache, re);
+               event->response = FAN_ALLOW;
+               return 0;
+       }
+               
        list_add_tail(&re->list, &group->fanotify_data.access_list);
        mutex_unlock(&group->fanotify_data.access_mutex);
 
@@ -364,9 +372,28 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
 static int fanotify_release(struct inode *ignored, struct file *file)
 {
        struct fsnotify_group *group = file->private_data;
+       struct fanotify_response_event *re, *lre;
 
        pr_debug("%s: file=%p group=%p\n", __func__, file, group);
 
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+       mutex_lock(&group->fanotify_data.access_mutex);
+
+       group->fanotify_data.bypass_perm = true;
+
+       list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
+               pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
+                        re, re->event);
+
+               list_del_init(&re->list);
+               re->event->response = FAN_ALLOW;
+
+               kmem_cache_free(fanotify_response_event_cache, re);
+       }
+       mutex_unlock(&group->fanotify_data.access_mutex);
+
+       wake_up(&group->fanotify_data.access_waitq);
+#endif
        /* matches the fanotify_init->fsnotify_alloc_group */
        fsnotify_put_group(group);
 
@@ -614,7 +641,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                __func__, flags, event_f_flags);
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        if (flags & ~FAN_ALL_INIT_FLAGS)
                return -EINVAL;
index 3970392b272264810574ca6536eed74bd3fc013d..36802420d69a94024eefde05f1eab9f6a9932241 100644 (file)
@@ -148,13 +148,14 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
                         const unsigned char *file_name,
                         struct fsnotify_event **event)
 {
-       struct fsnotify_group *group = inode_mark->group;
-       __u32 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
-       __u32 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+       struct fsnotify_group *group = NULL;
+       __u32 inode_test_mask = 0;
+       __u32 vfsmount_test_mask = 0;
 
-       pr_debug("%s: group=%p to_tell=%p mnt=%p mark=%p mask=%x data=%p"
-                " data_is=%d cookie=%d event=%p\n", __func__, group, to_tell,
-                mnt, inode_mark, mask, data, data_is, cookie, *event);
+       if (unlikely(!inode_mark && !vfsmount_mark)) {
+               BUG();
+               return 0;
+       }
 
        /* clear ignored on inode modification */
        if (mask & FS_MODIFY) {
@@ -168,18 +169,29 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
 
        /* does the inode mark tell us to do something? */
        if (inode_mark) {
+               group = inode_mark->group;
+               inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
                inode_test_mask &= inode_mark->mask;
                inode_test_mask &= ~inode_mark->ignored_mask;
        }
 
        /* does the vfsmount_mark tell us to do something? */
        if (vfsmount_mark) {
+               vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+               group = vfsmount_mark->group;
                vfsmount_test_mask &= vfsmount_mark->mask;
                vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
                if (inode_mark)
                        vfsmount_test_mask &= ~inode_mark->ignored_mask;
        }
 
+       pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
+                " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
+                " data=%p data_is=%d cookie=%d event=%p\n",
+                __func__, group, to_tell, mnt, mask, inode_mark,
+                inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
+                data_is, cookie, *event);
+
        if (!inode_test_mask && !vfsmount_test_mask)
                return 0;
 
@@ -207,13 +219,12 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
 int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
             const unsigned char *file_name, u32 cookie)
 {
-       struct hlist_node *inode_node, *vfsmount_node;
+       struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;
        struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
        struct fsnotify_group *inode_group, *vfsmount_group;
        struct fsnotify_event *event = NULL;
        struct vfsmount *mnt;
        int idx, ret = 0;
-       bool used_inode = false, used_vfsmount = false;
        /* global tests shouldn't care about events on child only the specific event */
        __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
 
@@ -238,57 +249,50 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
            (test_mask & to_tell->i_fsnotify_mask))
                inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
                                              &fsnotify_mark_srcu);
-       else
-               inode_node = NULL;
 
-       if (mnt) {
-               if ((mask & FS_MODIFY) ||
-                   (test_mask & mnt->mnt_fsnotify_mask))
-                       vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
-                                                        &fsnotify_mark_srcu);
-               else
-                       vfsmount_node = NULL;
-       } else {
-               mnt = NULL;
-               vfsmount_node = NULL;
+       if (mnt && ((mask & FS_MODIFY) ||
+                   (test_mask & mnt->mnt_fsnotify_mask))) {
+               vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
+                                                &fsnotify_mark_srcu);
+               inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
+                                             &fsnotify_mark_srcu);
        }
 
        while (inode_node || vfsmount_node) {
+               inode_group = vfsmount_group = NULL;
+
                if (inode_node) {
                        inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
                                                 struct fsnotify_mark, i.i_list);
                        inode_group = inode_mark->group;
-               } else
-                       inode_group = (void *)-1;
+               }
 
                if (vfsmount_node) {
                        vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
                                                        struct fsnotify_mark, m.m_list);
                        vfsmount_group = vfsmount_mark->group;
-               } else
-                       vfsmount_group = (void *)-1;
+               }
 
-               if (inode_group < vfsmount_group) {
+               if (inode_group > vfsmount_group) {
                        /* handle inode */
                        send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
                                      data_is, cookie, file_name, &event);
-                       used_inode = true;
-               } else if (vfsmount_group < inode_group) {
+                       /* we didn't use the vfsmount_mark */
+                       vfsmount_group = NULL;
+               } else if (vfsmount_group > inode_group) {
                        send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
                                      data_is, cookie, file_name, &event);
-                       used_vfsmount = true;
+                       inode_group = NULL;
                } else {
                        send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
                                      mask, data, data_is, cookie, file_name,
                                      &event);
-                       used_vfsmount = true;
-                       used_inode = true;
                }
 
-               if (used_inode)
+               if (inode_group)
                        inode_node = srcu_dereference(inode_node->next,
                                                      &fsnotify_mark_srcu);
-               if (used_vfsmount)
+               if (vfsmount_group)
                        vfsmount_node = srcu_dereference(vfsmount_node->next,
                                                         &fsnotify_mark_srcu);
        }
index 215e12ce1d85e2079359838cf287f1c3c670ff31..592fae5007d1245baade87453ce731121aa6efe5 100644 (file)
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
        last_page_bytes = PAGE_ALIGN(end);
        index = start >> PAGE_CACHE_SHIFT;
        do {
-               pages[numpages] = grab_cache_page(mapping, index);
+               pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
                if (!pages[numpages]) {
                        ret = -ENOMEM;
                        mlog_errno(ret);
index ec6d123395932b69b6a67ba0b5256be02b811c6f..c7ee03c22226253d970cce94beb11f6353b3e1d0 100644 (file)
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
-            "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
+            "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                goto out;
        }
 
-       mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+       mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
index 81296b4e364632dd5936f59d8adeab9832f2d2fd..9a03c151b5ceabc169215d99777abb92e2d2ad36 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/writeback.h>
 #include <linux/falloc.h>
 #include <linux/quotaops.h>
+#include <linux/blkdev.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
        if (err)
                goto bail;
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+               /*
+                * We still have to flush drive's caches to get data to the
+                * platter
+                */
+               if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+                       blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+                                          NULL, BLKDEV_IFL_WAIT);
                goto bail;
+       }
 
        journal = osb->journal->j_journal;
        err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
-       page = grab_cache_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        if (!page) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
        if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
-           ((file->f_flags & O_DIRECT) && has_refcount)) {
+           ((file->f_flags & O_DIRECT) && !direct_io)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
index 0492464916b19324e73425e29c473956b0b4bd33..eece3e05d9d0124d04b81c940c1289f876e700bb 100644 (file)
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                                     OCFS2_BH_IGNORE_CACHE);
        } else {
                status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
-               if (!status)
+               /*
+                * If buffer is in jbd, then its checksum may not have been
+                * computed as yet.
+                */
+               if (!status && !buffer_jbd(bh))
                        status = ocfs2_validate_inode_block(osb->sb, bh);
        }
        if (status < 0) {
index af2b8fe1f13999e26f6e2543a047847bcf517c4e..4c18f4ad93b43cae6e5ddcd5a2781fbc79292484 100644 (file)
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
        /*
         * Another node might have truncated while we were waiting on
         * cluster locks.
+        * We don't check size == 0 before the shift. This is borrowed
+        * from do_generic_file_read.
         */
-       last_index = size >> PAGE_CACHE_SHIFT;
-       if (page->index > last_index) {
+       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       if (unlikely(!size || page->index > last_index)) {
                ret = -EINVAL;
                goto out;
        }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
         * because the "write" would invalidate their data.
         */
        if (page->index == last_index)
-               len = size & ~PAGE_CACHE_MASK;
+               len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
 
        ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
                                       &fsdata, di_bh, page);
index f171b51a74f78d6e268b5d743a24df4e702f9643..a00dda2e4f16698e5c7d8e0946ba1f09506651c6 100644 (file)
@@ -472,32 +472,23 @@ leave:
        return status;
 }
 
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
-                             struct inode *dir,
-                             struct inode *inode,
-                             dev_t dev,
-                             struct buffer_head **new_fe_bh,
-                             struct buffer_head *parent_fe_bh,
-                             handle_t *handle,
-                             struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+                               struct inode *inode,
+                               dev_t dev,
+                               struct buffer_head **new_fe_bh,
+                               struct buffer_head *parent_fe_bh,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *inode_ac,
+                               u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
 {
        int status = 0;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_extent_list *fel;
-       u64 suballoc_loc, fe_blkno = 0;
-       u16 suballoc_bit;
        u16 feat;
 
        *new_fe_bh = NULL;
 
-       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
-                                      inode_ac, &suballoc_loc,
-                                      &suballoc_bit, &fe_blkno);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        /* populate as many fields early on as possible - many of
         * these are used by the support functions here and in
         * callers. */
@@ -591,6 +582,34 @@ leave:
        return status;
 }
 
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+                             struct inode *dir,
+                             struct inode *inode,
+                             dev_t dev,
+                             struct buffer_head **new_fe_bh,
+                             struct buffer_head *parent_fe_bh,
+                             handle_t *handle,
+                             struct ocfs2_alloc_context *inode_ac)
+{
+       int status = 0;
+       u64 suballoc_loc, fe_blkno = 0;
+       u16 suballoc_bit;
+
+       *new_fe_bh = NULL;
+
+       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+                                      inode_ac, &suballoc_loc,
+                                      &suballoc_bit, &fe_blkno);
+       if (status < 0) {
+               mlog_errno(status);
+               return status;
+       }
+
+       return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+                                   parent_fe_bh, handle, inode_ac,
+                                   fe_blkno, suballoc_loc, suballoc_bit);
+}
+
 static int ocfs2_mkdir(struct inode *dir,
                       struct dentry *dentry,
                       int mode)
@@ -1852,61 +1871,117 @@ bail:
        return status;
 }
 
-static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct inode **ret_orphan_dir,
-                                   u64 blkno,
-                                   char *name,
-                                   struct ocfs2_dir_lookup_result *lookup)
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+                                       struct inode **ret_orphan_dir,
+                                       struct buffer_head **ret_orphan_dir_bh)
 {
        struct inode *orphan_dir_inode;
        struct buffer_head *orphan_dir_bh = NULL;
-       int status = 0;
-
-       status = ocfs2_blkno_stringify(blkno, name);
-       if (status < 0) {
-               mlog_errno(status);
-               return status;
-       }
+       int ret = 0;
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
                                                       ORPHAN_DIR_SYSTEM_INODE,
                                                       osb->slot_num);
        if (!orphan_dir_inode) {
-               status = -ENOENT;
-               mlog_errno(status);
-               return status;
+               ret = -ENOENT;
+               mlog_errno(ret);
+               return ret;
        }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
 
-       status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
+       ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+       if (ret < 0) {
+               mutex_unlock(&orphan_dir_inode->i_mutex);
+               iput(orphan_dir_inode);
+
+               mlog_errno(ret);
+               return ret;
        }
 
-       status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
-                                             orphan_dir_bh, name,
-                                             OCFS2_ORPHAN_NAMELEN, lookup);
-       if (status < 0) {
-               ocfs2_inode_unlock(orphan_dir_inode, 1);
+       *ret_orphan_dir = orphan_dir_inode;
+       *ret_orphan_dir_bh = orphan_dir_bh;
 
-               mlog_errno(status);
-               goto leave;
+       return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+                                     struct buffer_head *orphan_dir_bh,
+                                     u64 blkno,
+                                     char *name,
+                                     struct ocfs2_dir_lookup_result *lookup)
+{
+       int ret;
+       struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+       ret = ocfs2_blkno_stringify(blkno, name);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+                                          orphan_dir_bh, name,
+                                          OCFS2_ORPHAN_NAMELEN, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ *          ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+                                   struct inode **ret_orphan_dir,
+                                   u64 blkno,
+                                   char *name,
+                                   struct ocfs2_dir_lookup_result *lookup)
+{
+       struct inode *orphan_dir_inode = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       int ret = 0;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+                                          &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+                                        blkno, name, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
        }
 
        *ret_orphan_dir = orphan_dir_inode;
 
-leave:
-       if (status) {
+out:
+       brelse(orphan_dir_bh);
+
+       if (ret) {
+               ocfs2_inode_unlock(orphan_dir_inode, 1);
                mutex_unlock(&orphan_dir_inode->i_mutex);
                iput(orphan_dir_inode);
        }
 
-       brelse(orphan_dir_bh);
-
-       mlog_exit(status);
-       return status;
+       mlog_exit(ret);
+       return ret;
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
        return status;
 }
 
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+                                       struct buffer_head *dir_bh,
+                                       char *orphan_name,
+                                       struct inode **ret_orphan_dir,
+                                       u64 *ret_di_blkno,
+                                       struct ocfs2_dir_lookup_result *orphan_insert,
+                                       struct ocfs2_alloc_context **ret_inode_ac)
+{
+       int ret;
+       u64 di_blkno;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+       struct inode *orphan_dir = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       struct ocfs2_alloc_context *inode_ac = NULL;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       /* reserve an inode spot */
+       ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+       if (ret < 0) {
+               if (ret != -ENOSPC)
+                       mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+                                      &di_blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+                                        di_blkno, orphan_name, orphan_insert);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+out:
+       if (ret == 0) {
+               *ret_orphan_dir = orphan_dir;
+               *ret_di_blkno = di_blkno;
+               *ret_inode_ac = inode_ac;
+               /*
+                * orphan_name and orphan_insert are already up to
+                * date via prepare_orphan_dir
+                */
+       } else {
+               /* Unroll reserve_new_inode* */
+               if (inode_ac)
+                       ocfs2_free_alloc_context(inode_ac);
+
+               /* Unroll orphan dir locking */
+               mutex_unlock(&orphan_dir->i_mutex);
+               ocfs2_inode_unlock(orphan_dir, 1);
+               iput(orphan_dir);
+       }
+
+       brelse(orphan_dir_bh);
+
+       return 0;
+}
+
 int ocfs2_create_inode_in_orphan(struct inode *dir,
                                 int mode,
                                 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
        struct buffer_head *new_di_bh = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+       u64 uninitialized_var(di_blkno), suballoc_loc;
+       u16 suballoc_bit;
 
        status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
        if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                return status;
        }
 
-       /*
-        * We give the orphan dir the root blkno to fake an orphan name,
-        * and allocate enough space for our insertion.
-        */
-       status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
-                                         osb->root_blkno,
-                                         orphan_name, &orphan_insert);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
-       /* reserve an inode spot */
-       status = ocfs2_reserve_new_inode(osb, &inode_ac);
+       status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+                                             orphan_name, &orphan_dir,
+                                             &di_blkno, &orphan_insert, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                goto leave;
        did_quota_inode = 1;
 
-       inode->i_nlink = 0;
-       /* do the real work now. */
-       status = ocfs2_mknod_locked(osb, dir, inode,
-                                   0, &new_di_bh, parent_di_bh, handle,
-                                   inode_ac);
+       status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+                                             &suballoc_loc,
+                                             &suballoc_bit, di_blkno);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
        }
 
-       status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+       inode->i_nlink = 0;
+       /* do the real work now. */
+       status = __ocfs2_mknod_locked(dir, inode,
+                                     0, &new_di_bh, parent_di_bh, handle,
+                                     inode_ac, di_blkno, suballoc_loc,
+                                     suballoc_bit);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
index 73a11ccfd4c280681abe672c5e9cd81e3b229a93..0afeda83120fa0e54bd2c9cc7e1f4f08c6e836bf 100644 (file)
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_CACHE_SIZE - 1))
                        to = map_end & (PAGE_CACHE_SIZE - 1);
 
-               page = grab_cache_page(mapping, page_index);
+               page = find_or_create_page(mapping, page_index, GFP_NOFS);
 
                /*
                 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
                if (map_end > end)
                        map_end = end;
 
-               page = grab_cache_page(context->inode->i_mapping, page_index);
+               page = find_or_create_page(context->inode->i_mapping,
+                                          page_index, GFP_NOFS);
                BUG_ON(!page);
 
                wait_on_page_writeback(page);
index a8e6a95a353f03dcb8a34cf928ded84ff7e6d127..8a286f54dca1f30a1d65b0d6cbb5baa0fa8c063c 100644 (file)
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
        u64             sr_bg_blkno;    /* The bg we allocated from.  Set
                                           to 0 when a block group is
                                           contiguous. */
+       u64             sr_bg_stable_blkno; /*
+                                            * Doesn't change, always
+                                            * set to target block
+                                            * group descriptor
+                                            * block.
+                                            */
        u64             sr_blkno;       /* The first allocated block */
        unsigned int    sr_bit_offset;  /* The bit in the bg */
        unsigned int    sr_bits;        /* How many bits we claimed */
 };
 
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+       if (res->sr_blkno == 0)
+               return 0;
+
+       if (res->sr_bg_blkno)
+               return res->sr_bg_blkno;
+
+       return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
        brelse(ac->ac_bh);
        ac->ac_bh = NULL;
        ac->ac_resv = NULL;
+       if (ac->ac_find_loc_priv) {
+               kfree(ac->ac_find_loc_priv);
+               ac->ac_find_loc_priv = NULL;
+       }
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (!ret)
                ocfs2_bg_discontig_fix_result(ac, gd, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
        ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
                                               res->sr_bits,
                                               le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (ret < 0)
                mlog_errno(ret);
 
+out_loc_only:
        *bits_left = le16_to_cpu(gd->bg_free_bits_count);
 
 out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
 {
        int status;
        u16 chain;
-       u32 tmp_used;
        u64 next_group;
        struct inode *alloc_inode = ac->ac_inode;
        struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        if (!status)
                ocfs2_bg_discontig_fix_result(ac, bg, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
 
        /*
         * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
                }
        }
 
-       /* Ok, claim our bits now: set the info on dinode, chainlist
-        * and then the group */
-       status = ocfs2_journal_access_di(handle,
-                                        INODE_CACHE(alloc_inode),
-                                        ac->ac_bh,
-                                        OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
+       status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+                                                 ac->ac_bh, res->sr_bits,
+                                                 chain);
+       if (status) {
                mlog_errno(status);
                goto bail;
        }
 
-       tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
-       fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
-       le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
-       ocfs2_journal_dirty(handle, ac->ac_bh);
-
        status = ocfs2_block_group_set_bits(handle,
                                            alloc_inode,
                                            bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
             (unsigned long long)le64_to_cpu(fe->i_blkno));
 
+out_loc_only:
        *bits_left = le16_to_cpu(bg->bg_free_bits_count);
 bail:
        brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
        int status;
        u16 victim, i;
        u16 bits_left = 0;
+       u64 hint = ac->ac_last_group;
        struct ocfs2_chain_list *cl;
        struct ocfs2_dinode *fe;
 
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                goto bail;
        }
 
-       res->sr_bg_blkno = ac->ac_last_group;
+       res->sr_bg_blkno = hint;
        if (res->sr_bg_blkno) {
                /* Attempt to short-circuit the usual search mechanism
                 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
 
        status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                    res, &bits_left);
-       if (!status)
+       if (!status) {
+               hint = ocfs2_group_from_res(res);
                goto set_hint;
+       }
        if (status < 0 && status != -ENOSPC) {
                mlog_errno(status);
                goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                ac->ac_chain = i;
                status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                            res, &bits_left);
-               if (!status)
+               if (!status) {
+                       hint = ocfs2_group_from_res(res);
                        break;
+               }
                if (status < 0 && status != -ENOSPC) {
                        mlog_errno(status);
                        goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
                if (bits_left < min_bits)
                        ac->ac_last_group = 0;
                else
-                       ac->ac_last_group = res->sr_bg_blkno;
+                       ac->ac_last_group = hint;
        }
 
 bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
        OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
 }
 
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno)
+{
+       int ret;
+       handle_t *handle = NULL;
+       struct ocfs2_suballoc_result *res;
+
+       BUG_ON(!ac);
+       BUG_ON(ac->ac_bits_given != 0);
+       BUG_ON(ac->ac_bits_wanted != 1);
+       BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+       res = kzalloc(sizeof(*res), GFP_NOFS);
+       if (res == NULL) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+       /*
+        * The handle started here is for chain relink. Alternatively,
+        * we could just disable relink for these calls.
+        */
+       handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * This will instruct ocfs2_claim_suballoc_bits and
+        * ocfs2_search_one_group to search but save actual allocation
+        * for later.
+        */
+       ac->ac_find_loc_only = 1;
+
+       ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ac->ac_find_loc_priv = res;
+       *fe_blkno = res->sr_blkno;
+
+out:
+       if (handle)
+               ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+       if (ret)
+               kfree(res);
+
+       return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno)
+{
+       int ret;
+       u16 chain;
+       struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+       struct buffer_head *bg_bh = NULL;
+       struct ocfs2_group_desc *bg;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+       /*
+        * Since di_blkno is being passed back in, we check for any
+        * inconsistencies which may have happened between
+        * calls. These are code bugs as di_blkno is not expected to
+        * change once returned from ocfs2_find_new_inode_loc()
+        */
+       BUG_ON(res->sr_blkno != di_blkno);
+
+       ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+                                         res->sr_bg_stable_blkno, &bg_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+       chain = le16_to_cpu(bg->bg_chain);
+
+       ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+                                              ac->ac_bh, res->sr_bits,
+                                              chain);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_block_group_set_bits(handle,
+                                        ac->ac_inode,
+                                        bg,
+                                        bg_bh,
+                                        res->sr_bit_offset,
+                                        res->sr_bits);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+            (unsigned long long)di_blkno);
+
+       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+       BUG_ON(res->sr_bits != 1);
+
+       *suballoc_loc = res->sr_bg_blkno;
+       *suballoc_bit = res->sr_bit_offset;
+       ac->ac_bits_given++;
+       ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+       brelse(bg_bh);
+
+       return ret;
+}
+
 int ocfs2_claim_new_inode(handle_t *handle,
                          struct inode *dir,
                          struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
  * suballoc_bit.
  */
 static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
-                                      u16 *suballoc_slot, u16 *suballoc_bit)
+                                      u16 *suballoc_slot, u64 *group_blkno,
+                                      u16 *suballoc_bit)
 {
        int status;
        struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
                *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
        if (suballoc_bit)
                *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+       if (group_blkno)
+               *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
 
 bail:
        brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
  */
 static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                                   struct inode *suballoc,
-                                  struct buffer_head *alloc_bh, u64 blkno,
+                                  struct buffer_head *alloc_bh,
+                                  u64 group_blkno, u64 blkno,
                                   u16 bit, int *res)
 {
        struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                goto bail;
        }
 
-       if (alloc_di->i_suballoc_loc)
-               bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
-       else
-               bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+       bg_blkno = group_blkno ? group_blkno :
+                  ocfs2_which_suballoc_group(blkno, bit);
        status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
                                             &group_bh);
        if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
 {
        int status;
+       u64 group_blkno = 0;
        u16 suballoc_bit = 0, suballoc_slot = 0;
        struct inode *inode_alloc_inode;
        struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        mlog_entry("blkno: %llu", (unsigned long long)blkno);
 
        status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
-                                            &suballoc_bit);
+                                            &group_blkno, &suballoc_bit);
        if (status < 0) {
                mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
                goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        }
 
        status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
-                                        blkno, suballoc_bit, res);
+                                        group_blkno, blkno, suballoc_bit, res);
        if (status < 0)
                mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
 
index a017dd3ee7d9ce2d6c0429d090b57ac077ed585a..b8afabfeede4c43694bdb8bf0a9664b0befd24a6 100644 (file)
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
        u64    ac_max_block;  /* Highest block number to allocate. 0 is
                                 is the same as ~0 - unlimited */
 
+       int    ac_find_loc_only;  /* hack for reflink operation ordering */
+       struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
        struct ocfs2_alloc_reservation  *ac_resv;
 };
 
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
                          struct ocfs2_alloc_context **meta_ac);
 
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno);
+
 #endif /* _CHAINALLOC_H_ */
index 180cf5a0bd67119218c170b265cf944c103b889b..3b8b456603318f1ff017056cdeda40129d559ab4 100644 (file)
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
 #endif
 
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 #endif
 
index 439fc1f1c1c41487ad76d23523d995a9f416b926..271afc48b9a5d58dd2874d41f8a08dfcae644481 100644 (file)
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
        if (vma->vm_flags & VM_GROWSDOWN)
-               start += PAGE_SIZE;
+               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+                       start += PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
index 1b27b5688f62fdfd8abd40386c6e1ece7aa1bf49..da3fefe91a8f855f59e62c84f0ef70a2a9bd0524 100644 (file)
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
        char *p;
 
        p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
-       if (p)
+       if (!IS_ERR(p))
                memmove(last_sysfs_file, p, strlen(p) + 1);
 
        /* need attr_sd for attr and ops, its parent for kobj */
index 15412fe15c3a47f3cc744af36cecfe2b01e5a5a3..b552f816de15942095f82ad0302b8656f7db09ae 100644 (file)
@@ -852,8 +852,8 @@ xfs_convert_page(
                SetPageUptodate(page);
 
        if (count) {
-               wbc->nr_to_write--;
-               if (wbc->nr_to_write <= 0)
+               if (--wbc->nr_to_write <= 0 &&
+                   wbc->sync_mode == WB_SYNC_NONE)
                        done = 1;
        }
        xfs_start_page_writeback(page, !page_dirty, count);
@@ -1068,7 +1068,7 @@ xfs_vm_writepage(
         * by themselves.
         */
        if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
-               goto out_fail;
+               goto redirty;
 
        /*
         * We need a transaction if there are delalloc or unwritten buffers
@@ -1080,7 +1080,7 @@ xfs_vm_writepage(
         */
        xfs_count_page_state(page, &delalloc, &unwritten);
        if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
-               goto out_fail;
+               goto redirty;
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -1245,12 +1245,15 @@ error:
        if (iohead)
                xfs_cancel_ioend(iohead);
 
+       if (err == -EAGAIN)
+               goto redirty;
+
        xfs_aops_discard_page(page);
        ClearPageUptodate(page);
        unlock_page(page);
        return err;
 
-out_fail:
+redirty:
        redirty_page_for_writepage(wbc, page);
        unlock_page(page);
        return 0;
index ea79072f521012549c4c2673a6210340834202a4..d72cf2bb054a54bd8fcfbd7dc75f582ad2d29591 100644 (file)
@@ -440,12 +440,7 @@ _xfs_buf_find(
                ASSERT(btp == bp->b_target);
                if (bp->b_file_offset == range_base &&
                    bp->b_buffer_length == range_length) {
-                       /*
-                        * If we look at something, bring it to the
-                        * front of the list for next time.
-                        */
                        atomic_inc(&bp->b_hold);
-                       list_move(&bp->b_hash_list, &hash->bh_list);
                        goto found;
                }
        }
@@ -1443,8 +1438,7 @@ xfs_alloc_bufhash(
 {
        unsigned int            i;
 
-       btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
-       btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
+       btp->bt_hashshift = external ? 3 : 12;  /* 8 or 4096 buckets */
        btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
                                         sizeof(xfs_bufhash_t));
        for (i = 0; i < (1 << btp->bt_hashshift); i++) {
index d072e5ff923b3e71eac4000bd432cd84d72e0d9b..2a05614f0b920c672a3d5950aef3701f91aab465 100644 (file)
@@ -137,7 +137,6 @@ typedef struct xfs_buftarg {
        size_t                  bt_smask;
 
        /* per device buffer hash table */
-       uint                    bt_hashmask;
        uint                    bt_hashshift;
        xfs_bufhash_t           *bt_hash;
 
index 237f5ffb2ee8ca067f6dc81ebc38a6de1f7b36ce..4fec427b83efc9dbf42ae9cda3aa64c5c71cdbec 100644 (file)
@@ -906,6 +906,13 @@ xfs_ioctl_setattr(
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
 
+       /*
+        * Disallow 32bit project ids because on-disk structure
+        * is 16bit only.
+        */
+       if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1))
+               return XFS_ERROR(EINVAL);
+
        /*
         * If disk quotas is on, we make sure that the dquots do exist on disk,
         * before we start any other transactions. Trying to do this later
index 68be25dcd301801506889f0f2ac23cd5e5ff420d..b1fc2a6bfe834667cefe95e9a842527ba886944d 100644 (file)
@@ -664,7 +664,7 @@ xfs_vn_fiemap(
                                        fieinfo->fi_extents_max + 1;
        bm.bmv_count = min_t(__s32, bm.bmv_count,
                             (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
-       bm.bmv_iflags = BMV_IF_PREALLOC;
+       bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
                bm.bmv_iflags |= BMV_IF_ATTRFORK;
        if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
index 15c35b62ff14ba46e0dd31d09cb5b0dfb54cf195..a4e07974955be3025ebbbe3839264b9bb1acba74 100644 (file)
@@ -1226,6 +1226,7 @@ xfs_fs_statfs(
        struct xfs_inode        *ip = XFS_I(dentry->d_inode);
        __uint64_t              fakeinos, id;
        xfs_extlen_t            lsize;
+       __int64_t               ffree;
 
        statp->f_type = XFS_SB_MAGIC;
        statp->f_namelen = MAXNAMELEN - 1;
@@ -1249,7 +1250,11 @@ xfs_fs_statfs(
                statp->f_files = min_t(typeof(statp->f_files),
                                        statp->f_files,
                                        mp->m_maxicount);
-       statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+
+       /* make sure statp->f_ffree does not underflow */
+       ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+       statp->f_ffree = max_t(__int64_t, ffree, 0);
+
        spin_unlock(&mp->m_sb_lock);
 
        if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
@@ -1402,7 +1407,7 @@ xfs_fs_freeze(
 
        xfs_save_resvblks(mp);
        xfs_quiesce_attr(mp);
-       return -xfs_fs_log_dummy(mp);
+       return -xfs_fs_log_dummy(mp, SYNC_WAIT);
 }
 
 STATIC int
index dfcbd98d15997e62e7d8a433fa71e5b8a9912609..d59c4a65d492c9b6b0713accaec1ab1c2ba7ea5f 100644 (file)
@@ -34,6 +34,7 @@
 #include "xfs_inode_item.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
+#include "xfs_fsops.h"
 
 #include <linux/kthread.h>
 #include <linux/freezer.h>
@@ -340,38 +341,6 @@ xfs_sync_attr(
                                     XFS_ICI_NO_TAG, 0, NULL);
 }
 
-STATIC int
-xfs_commit_dummy_trans(
-       struct xfs_mount        *mp,
-       uint                    flags)
-{
-       struct xfs_inode        *ip = mp->m_rootip;
-       struct xfs_trans        *tp;
-       int                     error;
-
-       /*
-        * Put a dummy transaction in the log to tell recovery
-        * that all others are OK.
-        */
-       tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
-       error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       xfs_trans_ijoin(tp, ip);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       error = xfs_trans_commit(tp, 0);
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-       /* the log force ensures this transaction is pushed to disk */
-       xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
-       return error;
-}
-
 STATIC int
 xfs_sync_fsdata(
        struct xfs_mount        *mp)
@@ -432,7 +401,7 @@ xfs_quiesce_data(
 
        /* mark the log as covered if needed */
        if (xfs_log_need_covered(mp))
-               error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT);
+               error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
 
        /* flush data-only devices */
        if (mp->m_rtdev_targp)
@@ -563,7 +532,7 @@ xfs_flush_inodes(
 /*
  * Every sync period we need to unpin all items, reclaim inodes and sync
  * disk quotas.  We might need to cover the log to indicate that the
- * filesystem is idle.
+ * filesystem is idle and not frozen.
  */
 STATIC void
 xfs_sync_worker(
@@ -577,8 +546,9 @@ xfs_sync_worker(
                xfs_reclaim_inodes(mp, 0);
                /* dgc: errors ignored here */
                error = xfs_qm_sync(mp, SYNC_TRYLOCK);
-               if (xfs_log_need_covered(mp))
-                       error = xfs_commit_dummy_trans(mp, 0);
+               if (mp->m_super->s_frozen == SB_UNFROZEN &&
+                   xfs_log_need_covered(mp))
+                       error = xfs_fs_log_dummy(mp, 0);
        }
        mp->m_sync_seq++;
        wake_up(&mp->m_wait_single_sync_task);
index 23f14e595c18cd3a8068796f5b73396fa9db98f9..f90dadd5a96877ff9975ecaaf2fb260863f7d089 100644 (file)
@@ -5533,12 +5533,24 @@ xfs_getbmap(
                                        map[i].br_startblock))
                                goto out_free_map;
 
-                       nexleft--;
                        bmv->bmv_offset =
                                out[cur_ext].bmv_offset +
                                out[cur_ext].bmv_length;
                        bmv->bmv_length =
                                max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
+
+                       /*
+                        * In case we don't want to return the hole,
+                        * don't increase cur_ext so that we can reuse
+                        * it in the next loop.
+                        */
+                       if ((iflags & BMV_IF_NO_HOLES) &&
+                           map[i].br_startblock == HOLESTARTBLOCK) {
+                               memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
+                               continue;
+                       }
+
+                       nexleft--;
                        bmv->bmv_entries++;
                        cur_ext++;
                }
index 7cf7220e7d5fb30dfece1fe9aa4f2798f3e017fc..87c2e9d02288d6ff0eba2949d379e1d02546eb11 100644 (file)
@@ -114,8 +114,10 @@ struct getbmapx {
 #define BMV_IF_NO_DMAPI_READ   0x2     /* Do not generate DMAPI read event  */
 #define BMV_IF_PREALLOC                0x4     /* rtn status BMV_OF_PREALLOC if req */
 #define BMV_IF_DELALLOC                0x8     /* rtn status BMV_OF_DELALLOC if req */
+#define BMV_IF_NO_HOLES                0x10    /* Do not return holes */
 #define BMV_IF_VALID   \
-       (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC)
+       (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|  \
+        BMV_IF_DELALLOC|BMV_IF_NO_HOLES)
 
 /*     bmv_oflags values - returned for each non-header segment */
 #define BMV_OF_PREALLOC                0x1     /* segment = unwritten pre-allocation */
index dbca5f5c37bad18fc220ce21f551d17b58f0c13a..43b1d56993350ba3af58c53757be99dfa936c017 100644 (file)
@@ -604,31 +604,36 @@ out:
        return 0;
 }
 
+/*
+ * Dump a transaction into the log that contains no real change. This is needed
+ * to be able to make the log dirty or stamp the current tail LSN into the log
+ * during the covering operation.
+ *
+ * We cannot use an inode here for this - that will push dirty state back up
+ * into the VFS and then periodic inode flushing will prevent log covering from
+ * making progress. Hence we log a field in the superblock instead.
+ */
 int
 xfs_fs_log_dummy(
-       xfs_mount_t     *mp)
+       xfs_mount_t     *mp,
+       int             flags)
 {
        xfs_trans_t     *tp;
-       xfs_inode_t     *ip;
        int             error;
 
        tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
-       error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+                                       XFS_DEFAULT_LOG_COUNT);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
        }
 
-       ip = mp->m_rootip;
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       xfs_trans_ijoin(tp, ip);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       xfs_trans_set_sync(tp);
-       error = xfs_trans_commit(tp, 0);
-
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       return error;
+       /* log the UUID because it is an unchanging field */
+       xfs_mod_sb(tp, XFS_SB_UUID);
+       if (flags & SYNC_WAIT)
+               xfs_trans_set_sync(tp);
+       return xfs_trans_commit(tp, 0);
 }
 
 int
index 88435e0a77c9b2f67035344e8f3afc58532d4d5c..a786c5212c1e478677e46f2725105a010ebd1262 100644 (file)
@@ -25,6 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
                                xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(xfs_mount_t *mp);
+extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
 
 #endif /* __XFS_FSOPS_H__ */
index abf80ae1e95bed43db56f17dd8a7ffc06039a60a..5371d2dc360ebde33776d7f44d87e4c79afe210e 100644 (file)
@@ -1213,7 +1213,6 @@ xfs_imap_lookup(
        struct xfs_inobt_rec_incore rec;
        struct xfs_btree_cur    *cur;
        struct xfs_buf          *agbp;
-       xfs_agino_t             startino;
        int                     error;
        int                     i;
 
@@ -1227,13 +1226,13 @@ xfs_imap_lookup(
        }
 
        /*
-        * derive and lookup the exact inode record for the given agino. If the
-        * record cannot be found, then it's an invalid inode number and we
-        * should abort.
+        * Lookup the inode record for the given agino. If the record cannot be
+        * found, then it's an invalid inode number and we should abort. Once
+        * we have a record, we need to ensure it contains the inode number
+        * we are looking up.
         */
        cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
-       startino = agino & ~(XFS_IALLOC_INODES(mp) - 1);
-       error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
+       error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
        if (!error) {
                if (i)
                        error = xfs_inobt_get_rec(cur, &rec, &i);
@@ -1246,6 +1245,11 @@ xfs_imap_lookup(
        if (error)
                return error;
 
+       /* check that the returned record contains the required inode */
+       if (rec.ir_startino > agino ||
+           rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino)
+               return EINVAL;
+
        /* for untrusted inodes check it is allocated first */
        if ((flags & XFS_IGET_UNTRUSTED) &&
            (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
index 68415cb4f23cab39861119c68838b1b099029df2..34798f391c49349018f04a47d625c6aafa035bea 100644 (file)
@@ -1914,6 +1914,11 @@ xfs_iunlink_remove(
        return 0;
 }
 
+/*
+ * A big issue when freeing the inode cluster is is that we _cannot_ skip any
+ * inodes that are in memory - they all must be marked stale and attached to
+ * the cluster buffer.
+ */
 STATIC void
 xfs_ifree_cluster(
        xfs_inode_t     *free_ip,
@@ -1945,8 +1950,6 @@ xfs_ifree_cluster(
        }
 
        for (j = 0; j < nbufs; j++, inum += ninodes) {
-               int     found = 0;
-
                blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
                                         XFS_INO_TO_AGBNO(mp, inum));
 
@@ -1965,7 +1968,9 @@ xfs_ifree_cluster(
                /*
                 * Walk the inodes already attached to the buffer and mark them
                 * stale. These will all have the flush locks held, so an
-                * in-memory inode walk can't lock them.
+                * in-memory inode walk can't lock them. By marking them all
+                * stale first, we will not attempt to lock them in the loop
+                * below as the XFS_ISTALE flag will be set.
                 */
                lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
                while (lip) {
@@ -1977,11 +1982,11 @@ xfs_ifree_cluster(
                                                        &iip->ili_flush_lsn,
                                                        &iip->ili_item.li_lsn);
                                xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
-                               found++;
                        }
                        lip = lip->li_bio_list;
                }
 
+
                /*
                 * For each inode in memory attempt to add it to the inode
                 * buffer and set it up for being staled on buffer IO
@@ -1993,6 +1998,7 @@ xfs_ifree_cluster(
                 * even trying to lock them.
                 */
                for (i = 0; i < ninodes; i++) {
+retry:
                        read_lock(&pag->pag_ici_lock);
                        ip = radix_tree_lookup(&pag->pag_ici_root,
                                        XFS_INO_TO_AGINO(mp, (inum + i)));
@@ -2003,38 +2009,36 @@ xfs_ifree_cluster(
                                continue;
                        }
 
-                       /* don't try to lock/unlock the current inode */
+                       /*
+                        * Don't try to lock/unlock the current inode, but we
+                        * _cannot_ skip the other inodes that we did not find
+                        * in the list attached to the buffer and are not
+                        * already marked stale. If we can't lock it, back off
+                        * and retry.
+                        */
                        if (ip != free_ip &&
                            !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
                                read_unlock(&pag->pag_ici_lock);
-                               continue;
+                               delay(1);
+                               goto retry;
                        }
                        read_unlock(&pag->pag_ici_lock);
 
-                       if (!xfs_iflock_nowait(ip)) {
-                               if (ip != free_ip)
-                                       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-                               continue;
-                       }
-
+                       xfs_iflock(ip);
                        xfs_iflags_set(ip, XFS_ISTALE);
-                       if (xfs_inode_clean(ip)) {
-                               ASSERT(ip != free_ip);
-                               xfs_ifunlock(ip);
-                               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-                               continue;
-                       }
 
+                       /*
+                        * we don't need to attach clean inodes or those only
+                        * with unlogged changes (which we throw away, anyway).
+                        */
                        iip = ip->i_itemp;
-                       if (!iip) {
-                               /* inode with unlogged changes only */
+                       if (!iip || xfs_inode_clean(ip)) {
                                ASSERT(ip != free_ip);
                                ip->i_update_core = 0;
                                xfs_ifunlock(ip);
                                xfs_iunlock(ip, XFS_ILOCK_EXCL);
                                continue;
                        }
-                       found++;
 
                        iip->ili_last_fields = iip->ili_format.ilf_fields;
                        iip->ili_format.ilf_fields = 0;
@@ -2049,8 +2053,7 @@ xfs_ifree_cluster(
                                xfs_iunlock(ip, XFS_ILOCK_EXCL);
                }
 
-               if (found)
-                       xfs_trans_stale_inode_buf(tp, bp);
+               xfs_trans_stale_inode_buf(tp, bp);
                xfs_trans_binval(tp, bp);
        }
 
index 925d572bf0f405e9a94be11b74549c5f760b3ec4..33f718f92a4849df234880dfe190069791d7bcb6 100644 (file)
@@ -3015,7 +3015,8 @@ _xfs_log_force(
 
        XFS_STATS_INC(xs_log_force);
 
-       xlog_cil_push(log, 1);
+       if (log->l_cilp)
+               xlog_cil_force(log);
 
        spin_lock(&log->l_icloglock);
 
@@ -3167,7 +3168,7 @@ _xfs_log_force_lsn(
        XFS_STATS_INC(xs_log_force);
 
        if (log->l_cilp) {
-               lsn = xlog_cil_push_lsn(log, lsn);
+               lsn = xlog_cil_force_lsn(log, lsn);
                if (lsn == NULLCOMMITLSN)
                        return 0;
        }
@@ -3724,7 +3725,7 @@ xfs_log_force_umount(
         * call below.
         */
        if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
-               xlog_cil_push(log, 1);
+               xlog_cil_force(log);
 
        /*
         * We must hold both the GRANT lock and the LOG lock,
index 31e4ea2d19acfc08f069813dffa367e6b2420973..ed575fb4b49597806200f676680ff787d5be9d12 100644 (file)
@@ -68,6 +68,7 @@ xlog_cil_init(
        ctx->sequence = 1;
        ctx->cil = cil;
        cil->xc_ctx = ctx;
+       cil->xc_current_sequence = ctx->sequence;
 
        cil->xc_log = log;
        log->l_cilp = cil;
@@ -269,15 +270,10 @@ xlog_cil_insert(
 static void
 xlog_cil_format_items(
        struct log              *log,
-       struct xfs_log_vec      *log_vector,
-       struct xlog_ticket      *ticket,
-       xfs_lsn_t               *start_lsn)
+       struct xfs_log_vec      *log_vector)
 {
        struct xfs_log_vec *lv;
 
-       if (start_lsn)
-               *start_lsn = log->l_cilp->xc_ctx->sequence;
-
        ASSERT(log_vector);
        for (lv = log_vector; lv; lv = lv->lv_next) {
                void    *ptr;
@@ -301,9 +297,24 @@ xlog_cil_format_items(
                        ptr += vec->i_len;
                }
                ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
+       }
+}
+
+static void
+xlog_cil_insert_items(
+       struct log              *log,
+       struct xfs_log_vec      *log_vector,
+       struct xlog_ticket      *ticket,
+       xfs_lsn_t               *start_lsn)
+{
+       struct xfs_log_vec *lv;
+
+       if (start_lsn)
+               *start_lsn = log->l_cilp->xc_ctx->sequence;
 
+       ASSERT(log_vector);
+       for (lv = log_vector; lv; lv = lv->lv_next)
                xlog_cil_insert(log, ticket, lv->lv_item, lv);
-       }
 }
 
 static void
@@ -320,80 +331,6 @@ xlog_cil_free_logvec(
        }
 }
 
-/*
- * Commit a transaction with the given vector to the Committed Item List.
- *
- * To do this, we need to format the item, pin it in memory if required and
- * account for the space used by the transaction. Once we have done that we
- * need to release the unused reservation for the transaction, attach the
- * transaction to the checkpoint context so we carry the busy extents through
- * to checkpoint completion, and then unlock all the items in the transaction.
- *
- * For more specific information about the order of operations in
- * xfs_log_commit_cil() please refer to the comments in
- * xfs_trans_commit_iclog().
- *
- * Called with the context lock already held in read mode to lock out
- * background commit, returns without it held once background commits are
- * allowed again.
- */
-int
-xfs_log_commit_cil(
-       struct xfs_mount        *mp,
-       struct xfs_trans        *tp,
-       struct xfs_log_vec      *log_vector,
-       xfs_lsn_t               *commit_lsn,
-       int                     flags)
-{
-       struct log              *log = mp->m_log;
-       int                     log_flags = 0;
-       int                     push = 0;
-
-       if (flags & XFS_TRANS_RELEASE_LOG_RES)
-               log_flags = XFS_LOG_REL_PERM_RESERV;
-
-       if (XLOG_FORCED_SHUTDOWN(log)) {
-               xlog_cil_free_logvec(log_vector);
-               return XFS_ERROR(EIO);
-       }
-
-       /* lock out background commit */
-       down_read(&log->l_cilp->xc_ctx_lock);
-       xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn);
-
-       /* check we didn't blow the reservation */
-       if (tp->t_ticket->t_curr_res < 0)
-               xlog_print_tic_res(log->l_mp, tp->t_ticket);
-
-       /* attach the transaction to the CIL if it has any busy extents */
-       if (!list_empty(&tp->t_busy)) {
-               spin_lock(&log->l_cilp->xc_cil_lock);
-               list_splice_init(&tp->t_busy,
-                                       &log->l_cilp->xc_ctx->busy_extents);
-               spin_unlock(&log->l_cilp->xc_cil_lock);
-       }
-
-       tp->t_commit_lsn = *commit_lsn;
-       xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
-       xfs_trans_unreserve_and_mod_sb(tp);
-
-       /* check for background commit before unlock */
-       if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
-               push = 1;
-       up_read(&log->l_cilp->xc_ctx_lock);
-
-       /*
-        * We need to push CIL every so often so we don't cache more than we
-        * can fit in the log. The limit really is that a checkpoint can't be
-        * more than half the log (the current checkpoint is not allowed to
-        * overwrite the previous checkpoint), but commit latency and memory
-        * usage limit this to a smaller size in most cases.
-        */
-       if (push)
-               xlog_cil_push(log, 0);
-       return 0;
-}
-
 /*
  * Mark all items committed and clear busy extents. We free the log vector
  * chains in a separate pass so that we unpin the log items as quickly as
@@ -427,13 +364,23 @@ xlog_cil_committed(
 }
 
 /*
- * Push the Committed Item List to the log. If the push_now flag is not set,
- * then it is a background flush and so we can chose to ignore it.
+ * Push the Committed Item List to the log. If @push_seq flag is zero, then it
+ * is a background flush and so we can chose to ignore it. Otherwise, if the
+ * current sequence is the same as @push_seq we need to do a flush. If
+ * @push_seq is less than the current sequence, then it has already been
+ * flushed and we don't need to do anything - the caller will wait for it to
+ * complete if necessary.
+ *
+ * @push_seq is a value rather than a flag because that allows us to do an
+ * unlocked check of the sequence number for a match. Hence we can allows log
+ * forces to run racily and not issue pushes for the same sequence twice. If we
+ * get a race between multiple pushes for the same sequence they will block on
+ * the first one and then abort, hence avoiding needless pushes.
  */
-int
+STATIC int
 xlog_cil_push(
        struct log              *log,
-       int                     push_now)
+       xfs_lsn_t               push_seq)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_log_vec      *lv;
@@ -453,12 +400,14 @@ xlog_cil_push(
        if (!cil)
                return 0;
 
+       ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
+
        new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
        /* lock out transaction commit, but don't block on background push */
        if (!down_write_trylock(&cil->xc_ctx_lock)) {
-               if (!push_now)
+               if (!push_seq)
                        goto out_free_ticket;
                down_write(&cil->xc_ctx_lock);
        }
@@ -469,7 +418,11 @@ xlog_cil_push(
                goto out_skip;
 
        /* check for spurious background flush */
-       if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+       if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+               goto out_skip;
+
+       /* check for a previously pushed seqeunce */
+       if (push_seq < cil->xc_ctx->sequence)
                goto out_skip;
 
        /*
@@ -514,6 +467,13 @@ xlog_cil_push(
        new_ctx->cil = cil;
        cil->xc_ctx = new_ctx;
 
+       /*
+        * mirror the new sequence into the cil structure so that we can do
+        * unlocked checks against the current sequence in log forces without
+        * risking deferencing a freed context pointer.
+        */
+       cil->xc_current_sequence = new_ctx->sequence;
+
        /*
         * The switch is now done, so we can drop the context lock and move out
         * of a shared context. We can't just go straight to the commit record,
@@ -625,6 +585,102 @@ out_abort:
        return XFS_ERROR(EIO);
 }
 
+/*
+ * Commit a transaction with the given vector to the Committed Item List.
+ *
+ * To do this, we need to format the item, pin it in memory if required and
+ * account for the space used by the transaction. Once we have done that we
+ * need to release the unused reservation for the transaction, attach the
+ * transaction to the checkpoint context so we carry the busy extents through
+ * to checkpoint completion, and then unlock all the items in the transaction.
+ *
+ * For more specific information about the order of operations in
+ * xfs_log_commit_cil() please refer to the comments in
+ * xfs_trans_commit_iclog().
+ *
+ * Called with the context lock already held in read mode to lock out
+ * background commit, returns without it held once background commits are
+ * allowed again.
+ */
+int
+xfs_log_commit_cil(
+       struct xfs_mount        *mp,
+       struct xfs_trans        *tp,
+       struct xfs_log_vec      *log_vector,
+       xfs_lsn_t               *commit_lsn,
+       int                     flags)
+{
+       struct log              *log = mp->m_log;
+       int                     log_flags = 0;
+       int                     push = 0;
+
+       if (flags & XFS_TRANS_RELEASE_LOG_RES)
+               log_flags = XFS_LOG_REL_PERM_RESERV;
+
+       if (XLOG_FORCED_SHUTDOWN(log)) {
+               xlog_cil_free_logvec(log_vector);
+               return XFS_ERROR(EIO);
+       }
+
+       /*
+        * do all the hard work of formatting items (including memory
+        * allocation) outside the CIL context lock. This prevents stalling CIL
+        * pushes when we are low on memory and a transaction commit spends a
+        * lot of time in memory reclaim.
+        */
+       xlog_cil_format_items(log, log_vector);
+
+       /* lock out background commit */
+       down_read(&log->l_cilp->xc_ctx_lock);
+       xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn);
+
+       /* check we didn't blow the reservation */
+       if (tp->t_ticket->t_curr_res < 0)
+               xlog_print_tic_res(log->l_mp, tp->t_ticket);
+
+       /* attach the transaction to the CIL if it has any busy extents */
+       if (!list_empty(&tp->t_busy)) {
+               spin_lock(&log->l_cilp->xc_cil_lock);
+               list_splice_init(&tp->t_busy,
+                                       &log->l_cilp->xc_ctx->busy_extents);
+               spin_unlock(&log->l_cilp->xc_cil_lock);
+       }
+
+       tp->t_commit_lsn = *commit_lsn;
+       xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+       xfs_trans_unreserve_and_mod_sb(tp);
+
+       /*
+        * Once all the items of the transaction have been copied to the CIL,
+        * the items can be unlocked and freed.
+        *
+        * This needs to be done before we drop the CIL context lock because we
+        * have to update state in the log items and unlock them before they go
+        * to disk. If we don't, then the CIL checkpoint can race with us and
+        * we can run checkpoint completion before we've updated and unlocked
+        * the log items. This affects (at least) processing of stale buffers,
+        * inodes and EFIs.
+        */
+       xfs_trans_free_items(tp, *commit_lsn, 0);
+
+       /* check for background commit before unlock */
+       if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
+               push = 1;
+
+       up_read(&log->l_cilp->xc_ctx_lock);
+
+       /*
+        * We need to push CIL every so often so we don't cache more than we
+        * can fit in the log. The limit really is that a checkpoint can't be
+        * more than half the log (the current checkpoint is not allowed to
+        * overwrite the previous checkpoint), but commit latency and memory
+        * usage limit this to a smaller size in most cases.
+        */
+       if (push)
+               xlog_cil_push(log, 0);
+       return 0;
+}
+
 /*
  * Conditionally push the CIL based on the sequence passed in.
  *
@@ -639,39 +695,34 @@ out_abort:
  * commit lsn is there. It'll be empty, so this is broken for now.
  */
 xfs_lsn_t
-xlog_cil_push_lsn(
+xlog_cil_force_lsn(
        struct log      *log,
-       xfs_lsn_t       push_seq)
+       xfs_lsn_t       sequence)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_cil_ctx      *ctx;
        xfs_lsn_t               commit_lsn = NULLCOMMITLSN;
 
-restart:
-       down_write(&cil->xc_ctx_lock);
-       ASSERT(push_seq <= cil->xc_ctx->sequence);
-
-       /* check to see if we need to force out the current context */
-       if (push_seq == cil->xc_ctx->sequence) {
-               up_write(&cil->xc_ctx_lock);
-               xlog_cil_push(log, 1);
-               goto restart;
-       }
+       ASSERT(sequence <= cil->xc_current_sequence);
+
+       /*
+        * check to see if we need to force out the current context.
+        * xlog_cil_push() handles racing pushes for the same sequence,
+        * so no need to deal with it here.
+        */
+       if (sequence == cil->xc_current_sequence)
+               xlog_cil_push(log, sequence);
 
        /*
         * See if we can find a previous sequence still committing.
-        * We can drop the flush lock as soon as we have the cil lock
-        * because we are now only comparing contexts protected by
-        * the cil lock.
-        *
         * We need to wait for all previous sequence commits to complete
         * before allowing the force of push_seq to go ahead. Hence block
         * on commits for those as well.
         */
+restart:
        spin_lock(&cil->xc_cil_lock);
-       up_write(&cil->xc_ctx_lock);
        list_for_each_entry(ctx, &cil->xc_committing, committing) {
-               if (ctx->sequence > push_seq)
+               if (ctx->sequence > sequence)
                        continue;
                if (!ctx->commit_lsn) {
                        /*
@@ -681,7 +732,7 @@ restart:
                        sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
                        goto restart;
                }
-               if (ctx->sequence != push_seq)
+               if (ctx->sequence != sequence)
                        continue;
                /* found it! */
                commit_lsn = ctx->commit_lsn;
index 8c072618965caf80476a562e7e38cae026089f88..ced52b98b322e3eb1be0e0c7dfc70f6096d0cd80 100644 (file)
@@ -422,6 +422,7 @@ struct xfs_cil {
        struct rw_semaphore     xc_ctx_lock;
        struct list_head        xc_committing;
        sv_t                    xc_commit_wait;
+       xfs_lsn_t               xc_current_sequence;
 };
 
 /*
@@ -562,8 +563,16 @@ int        xlog_cil_init(struct log *log);
 void   xlog_cil_init_post_recovery(struct log *log);
 void   xlog_cil_destroy(struct log *log);
 
-int    xlog_cil_push(struct log *log, int push_now);
-xfs_lsn_t xlog_cil_push_lsn(struct log *log, xfs_lsn_t push_sequence);
+/*
+ * CIL force routines
+ */
+xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+
+static inline void
+xlog_cil_force(struct log *log)
+{
+       xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
+}
 
 /*
  * Unmount record type is used as a pseudo transaction type for the ticket.
index fdca7416c754636a26dae9a312003932f7306d9f..1c47edaea0d28f4def851e87f664bd9339ac19b7 100644 (file)
@@ -1167,7 +1167,7 @@ xfs_trans_del_item(
  * Unlock all of the items of a transaction and free all the descriptors
  * of that transaction.
  */
-STATIC void
+void
 xfs_trans_free_items(
        struct xfs_trans        *tp,
        xfs_lsn_t               commit_lsn,
@@ -1653,9 +1653,6 @@ xfs_trans_commit_cil(
                return error;
 
        current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-
-       /* xfs_trans_free_items() unlocks them first */
-       xfs_trans_free_items(tp, *commit_lsn, 0);
        xfs_trans_free(tp);
        return 0;
 }
index e2d93d8ead7b68b9b6ab74b1869a3f8e10721981..62da86c90de53bb36b3fd2928d539ae9fc339478 100644 (file)
@@ -25,7 +25,8 @@ struct xfs_trans;
 
 void   xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void   xfs_trans_del_item(struct xfs_log_item *);
-
+void   xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
+                               int flags);
 void   xfs_trans_item_committed(struct xfs_log_item *lip,
                                xfs_lsn_t commit_lsn, int aborted);
 void   xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
index 66d585c6917cfd55e50447b8048a2adc3f60d68c..4c7c7bfb2b2fc0aa8cc3cb3c98334392eb902c00 100644 (file)
@@ -2299,15 +2299,22 @@ xfs_alloc_file_space(
                        e = allocatesize_fsb;
                }
 
+               /*
+                * The transaction reservation is limited to a 32-bit block
+                * count, hence we need to limit the number of blocks we are
+                * trying to reserve to avoid an overflow. We can't allocate
+                * more than @nimaps extents, and an extent is limited on disk
+                * to MAXEXTLEN (21 bits), so use that to enforce the limit.
+                */
+               resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
                if (unlikely(rt)) {
-                       resrtextents = qblocks = (uint)(e - s);
+                       resrtextents = qblocks = resblks;
                        resrtextents /= mp->m_sb.sb_rextsize;
                        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
                        quota_flag = XFS_QMOPT_RES_RTBLKS;
                } else {
                        resrtextents = 0;
-                       resblks = qblocks = \
-                               XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
+                       resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
                        quota_flag = XFS_QMOPT_RES_REGBLKS;
                }
 
index baacd98e7cc6244b795b5db1b09b440292ee3dac..4de84ce3a927123f118861df950202a8dba1a536 100644 (file)
@@ -377,9 +377,6 @@ struct acpi_pci_root {
 
        u32 osc_support_set;    /* _OSC state of support bits */
        u32 osc_control_set;    /* _OSC state of control bits */
-       u32 osc_control_qry;    /* the latest _OSC query result */
-
-       u32 osc_queried:1;      /* has _OSC control been queried? */
 };
 
 /* helper */
index c7376bf80b0604bf8f8b9394989178d6de45ecc2..8ca18e26d7e39fe429a8179d48f2f9f17f58a589 100644 (file)
  * While the GPIO programming interface defines valid GPIO numbers
  * to be in the range 0..MAX_INT, this library restricts them to the
  * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
  */
 
 #ifndef ARCH_NR_GPIOS
 #define ARCH_NR_GPIOS          256
 #endif
 
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request().  only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
 static inline int gpio_is_valid(int number)
 {
-       /* only some non-negative numbers are valid */
        return ((unsigned)number) < ARCH_NR_GPIOS;
 }
 
index b5043a9890d85a38d4e63d3ae40f546675639e30..08923b6847681f364557fd52a43b076eaf4d9a2e 100644 (file)
@@ -70,11 +70,16 @@ extern void setup_per_cpu_areas(void);
 
 #else /* ! SMP */
 
-#define per_cpu(var, cpu)                      (*((void)(cpu), &(var)))
-#define __get_cpu_var(var)                     (var)
-#define __raw_get_cpu_var(var)                 (var)
-#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
-#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
+#define VERIFY_PERCPU_PTR(__p) ({                      \
+       __verify_pcpu_ptr((__p));                       \
+       (typeof(*(__p)) __kernel __force *)(__p);       \
+})
+
+#define per_cpu(var, cpu)      (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
+#define __get_cpu_var(var)     (*VERIFY_PERCPU_PTR(&(var)))
+#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
+#define this_cpu_ptr(ptr)      per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr)    this_cpu_ptr(ptr)
 
 #endif /* SMP */
 
index 2a512bc0d4ab74f7c56bc9460b7df3e4e3c0f8d9..7809d230adee3f90c9537f6c00ec66bb53c1bd27 100644 (file)
@@ -305,14 +305,16 @@ struct drm_ioctl_desc {
        unsigned int cmd;
        int flags;
        drm_ioctl_t *func;
+       unsigned int cmd_drv;
 };
 
 /**
  * Creates a driver or general drm_ioctl_desc array entry for the given
  * ioctl, for use by drm_ioctl().
  */
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags}
+
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)                        \
+       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
 
 struct drm_magic_entry {
        struct list_head head;
index 4b00d2dd4f684f7dfde4256c84f09bf08844e2b6..61315c29b8f3d53dcb903487e13825bd507e48bf 100644 (file)
@@ -264,20 +264,20 @@ typedef struct _drm_i830_sarea {
 #define DRM_I830_GETPARAM      0x0c
 #define DRM_I830_SETPARAM      0x0d
 
-#define DRM_IOCTL_I830_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX          DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR           DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE          DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF          DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP            DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
-#define DRM_IOCTL_I830_COPY            DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY          DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP            DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
+#define DRM_IOCTL_I830_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX          DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR           DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
+#define DRM_IOCTL_I830_GETAGE          DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
+#define DRM_IOCTL_I830_GETBUF          DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP            DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
+#define DRM_IOCTL_I830_COPY            DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY          DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
+#define DRM_IOCTL_I830_FLIP            DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
+#define DRM_IOCTL_I830_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
+#define DRM_IOCTL_I830_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
+#define DRM_IOCTL_I830_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
+#define DRM_IOCTL_I830_SETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
 
 typedef struct _drm_i830_clear {
        int clear_color;
index 8f8b072c4c7b6013e12c10e35bbf1da24f6a9d79..e41c74facb6a3e41e84e3c66df6395d1a94add76 100644 (file)
@@ -215,6 +215,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_VBLANK_SWAP     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_INIT                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
index 3ffbc4798afacaec3f521eac11b607269cfd7c20..c16097f99be090f557d21405f8eeee1446be0324 100644 (file)
@@ -248,7 +248,7 @@ typedef struct _drm_mga_sarea {
 #define DRM_MGA_DMA_BOOTSTRAP  0x0c
 
 #define DRM_IOCTL_MGA_INIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
-#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
+#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
 #define DRM_IOCTL_MGA_RESET    DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_RESET)
 #define DRM_IOCTL_MGA_SWAP     DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_SWAP)
 #define DRM_IOCTL_MGA_CLEAR    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
index fe917dee723a7357f44c285626189ae7acd5e863..01a7141195068c8e0fd384c0dde661adfb9601b6 100644 (file)
@@ -197,4 +197,17 @@ struct drm_nouveau_sarea {
 #define DRM_NOUVEAU_GEM_CPU_FINI       0x43
 #define DRM_NOUVEAU_GEM_INFO           0x44
 
+#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC  DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+#define DRM_IOCTL_NOUVEAU_GEM_NEW            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
+#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF        DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
+#define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+
 #endif /* __NOUVEAU_DRM_H__ */
index 0acaf8f9143751791f54b3a2da0be0c40653ad19..10f8b53bdd404d47c80cd8d9ee876c57b1c2b8a5 100644 (file)
@@ -547,8 +547,8 @@ typedef struct {
 #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
 #define DRM_IOCTL_RADEON_CS            DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
 #define DRM_IOCTL_RADEON_INFO          DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
-#define DRM_IOCTL_RADEON_SET_TILING    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
-#define DRM_IOCTL_RADEON_GET_TILING    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
 #define DRM_IOCTL_RADEON_GEM_BUSY      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
 
 typedef struct drm_radeon_init {
index 8a576ef01821715099d7f16ef991571f290749ce..4863cf6bf96fb23c96d3eab9d7cde5d794fe89bc 100644 (file)
@@ -63,10 +63,10 @@ typedef struct _drm_savage_sarea {
 #define DRM_SAVAGE_BCI_EVENT_EMIT      0x02
 #define DRM_SAVAGE_BCI_EVENT_WAIT      0x03
 
-#define DRM_IOCTL_SAVAGE_INIT          DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
-#define DRM_IOCTL_SAVAGE_CMDBUF                DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
-#define DRM_IOCTL_SAVAGE_EVENT_EMIT    DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
-#define DRM_IOCTL_SAVAGE_EVENT_WAIT    DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+#define DRM_IOCTL_SAVAGE_BCI_INIT              DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_BCI_CMDBUF            DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT        DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT        DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
 
 #define SAVAGE_DMA_PCI 1
 #define SAVAGE_DMA_AGP 3
index ccf94dc5acdf0ba8cc4f836968f103f542ff43b6..c227757feb06b4c22eb15ca3faf82c1dfa9f107e 100644 (file)
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
                                OSC_PCI_EXPRESS_PME_CONTROL |           \
                                OSC_PCI_EXPRESS_AER_CONTROL |           \
                                OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
+extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
+                                            u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
 #else  /* !CONFIG_ACPI */
index ed3e92e41c6e5683ad3dbb823e48259f5150ac33..0c991023ee475fad85136a04b00cb8d2699a382b 100644 (file)
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+       return cgroup_attach_task_all(current, tsk);
+}
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
 }
 
 /* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+                                        struct task_struct *t)
+{
+       return 0;
+}
 static inline int cgroup_attach_task_current_cg(struct task_struct *t)
 {
        return 0;
index f0949a57ca9d72cc7cb47df5717ab6fef250427b..63531a6b4d2a0e42759dbe1a224a103afec1327a 100644 (file)
                                 FAN_ALL_PERM_EVENTS |\
                                 FAN_Q_OVERFLOW)
 
-#define FANOTIFY_METADATA_VERSION      1
+#define FANOTIFY_METADATA_VERSION      2
 
 struct fanotify_event_metadata {
        __u32 event_len;
        __u32 vers;
-       __s32 fd;
        __u64 mask;
-       __s64 pid;
+       __s32 fd;
+       __s32 pid;
 } __attribute__ ((packed));
 
 struct fanotify_response {
@@ -95,11 +95,4 @@ struct fanotify_response {
                                (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
                                (long)(meta)->event_len <= (long)(len))
 
-#ifdef __KERNEL__
-
-struct fanotify_wait {
-       struct fsnotify_event *event;
-       __s32 fd;
-};
-#endif /* __KERNEL__ */
 #endif /* _LINUX_FANOTIFY_H */
index ed36fb57c426b13ce2b9ec3df0574779607949f5..e40190d1687813d5c86e2a1b9e414e48fcf22595 100644 (file)
@@ -156,6 +156,7 @@ struct fsnotify_group {
                        struct mutex access_mutex;
                        struct list_head access_list;
                        wait_queue_head_t access_waitq;
+                       bool bypass_perm; /* protected by access_mutex */
 #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
                        int f_flags;
                } fanotify_data;
index ee3049cb9ba5782e27447d3984a914a2d39ae87b..52baa79d69a763f7f94f728b115fe0337190addd 100644 (file)
@@ -63,6 +63,9 @@
  *            IRQ lines will appear.  Similarly to gpio_base, the expander
  *            will create a block of irqs beginning at this number.
  *            This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ *                      reset of the chip at the beginning of the probe
+ *                      in order to place it in a known state.
  */
 struct sx150x_platform_data {
        unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
        u16      io_polarity;
        int      irq_summary;
        unsigned irq_base;
+       bool     reset_during_probe;
 };
 
 #endif /* __LINUX_I2C_SX150X_H */
index c831467774d0e71963ccf774d310bd4f6dd91b8e..bed7a4682b90734935e3dd6680d5e646b0b895c0 100644 (file)
@@ -119,7 +119,7 @@ struct ethhdr {
        unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
        unsigned char   h_source[ETH_ALEN];     /* source ether addr    */
        __be16          h_proto;                /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
index 9947c39e62f6fa49ea95505d0afc8053a27fbebb..e6dc11e7f9a54613f3700af8fe9d6557d8bd6135 100644 (file)
@@ -67,7 +67,7 @@ struct fddi_8022_1_hdr {
        __u8    dsap;                                   /* destination service access point */
        __u8    ssap;                                   /* source service access point */
        __u8    ctrl;                                   /* control byte #1 */
-} __packed;
+} __attribute__((packed));
 
 /* Define 802.2 Type 2 header */
 struct fddi_8022_2_hdr {
@@ -75,7 +75,7 @@ struct fddi_8022_2_hdr {
        __u8    ssap;                                   /* source service access point */
        __u8    ctrl_1;                                 /* control byte #1 */
        __u8    ctrl_2;                                 /* control byte #2 */
-} __packed;
+} __attribute__((packed));
 
 /* Define 802.2 SNAP header */
 #define FDDI_K_OUI_LEN 3
@@ -85,7 +85,7 @@ struct fddi_snap_hdr {
        __u8    ctrl;                                   /* always 0x03 */
        __u8    oui[FDDI_K_OUI_LEN];    /* organizational universal id */
        __be16  ethertype;                              /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 /* Define FDDI LLC frame header */
 struct fddihdr {
@@ -98,7 +98,7 @@ struct fddihdr {
                struct fddi_8022_2_hdr          llc_8022_2;
                struct fddi_snap_hdr            llc_snap;
                } hdr;
-} __packed;
+} __attribute__((packed));
 
 #ifdef __KERNEL__
 #include <linux/netdevice.h>
index 5fe5f307c6f560f424f16252ad61a1a9bf4b7b45..cdc049f1829a8ca6ccc9aabb2103b1aacd3fb990 100644 (file)
@@ -104,7 +104,7 @@ struct hippi_fp_hdr {
        __be32          fixed;
 #endif
        __be32          d2_size;
-} __packed;
+} __attribute__((packed));
 
 struct hippi_le_hdr {
 #if defined (__BIG_ENDIAN_BITFIELD)
@@ -129,7 +129,7 @@ struct hippi_le_hdr {
        __u8            daddr[HIPPI_ALEN];
        __u16           locally_administered;
        __u8            saddr[HIPPI_ALEN];
-} __packed;
+} __attribute__((packed));
 
 #define HIPPI_OUI_LEN  3
 /*
@@ -142,12 +142,12 @@ struct hippi_snap_hdr {
        __u8    ctrl;                   /* always 0x03 */
        __u8    oui[HIPPI_OUI_LEN];     /* organizational universal id (zero)*/
        __be16  ethertype;              /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 struct hippi_hdr {
        struct hippi_fp_hdr     fp;
        struct hippi_le_hdr     le;
        struct hippi_snap_hdr   snap;
-} __packed;
+} __attribute__((packed));
 
 #endif /* _LINUX_IF_HIPPI_H */
index 1925e0c3f1623e0d515a22ac6cbc01074fd83e68..27741e05446f97dfad3e5f35e83f328d02520588 100644 (file)
@@ -59,7 +59,7 @@ struct sockaddr_pppox {
        union{ 
                struct pppoe_addr       pppoe; 
        }sa_addr; 
-} __packed;
+} __attribute__((packed));
 
 /* The use of the above union isn't viable because the size of this
  * struct must stay fixed over time -- applications use sizeof(struct
@@ -70,7 +70,7 @@ struct sockaddr_pppol2tp {
        sa_family_t     sa_family;      /* address family, AF_PPPOX */
        unsigned int    sa_protocol;    /* protocol identifier */
        struct pppol2tp_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
 
 /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
  * bits. So we need a different sockaddr structure.
@@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 {
        sa_family_t     sa_family;      /* address family, AF_PPPOX */
        unsigned int    sa_protocol;    /* protocol identifier */
        struct pppol2tpv3_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
 
 /*********************************************************************
  *
@@ -101,7 +101,7 @@ struct pppoe_tag {
        __be16 tag_type;
        __be16 tag_len;
        char tag_data[0];
-} __attribute ((packed));
+} __attribute__ ((packed));
 
 /* Tag identifiers */
 #define PTT_EOL                __cpu_to_be16(0x0000)
@@ -129,7 +129,7 @@ struct pppoe_hdr {
        __be16 sid;
        __be16 length;
        struct pppoe_tag tag[0];
-} __packed;
+} __attribute__((packed));
 
 /* Length of entire PPPoE + PPP header */
 #define PPPOE_SES_HLEN 8
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
new file mode 100644 (file)
index 0000000..1d19ab2
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_GTT_H
+#define _INTEL_GTT_H
+
+#include <linux/agp_backend.h>
+
+/* This is for Intel only GTT controls.
+ *
+ * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
+ */
+
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
index 0a6b3d5c490ccfcd3ab9a3dbdba9b41f88e1c4b8..7fb59279373823339f6fdda86158952e3e6fac45 100644 (file)
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
        iounmap_atomic(vaddr, slot);
 }
 
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
        resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
        iounmap(vaddr);
 }
@@ -125,38 +125,38 @@ struct io_mapping;
 static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
-       return (struct io_mapping *) ioremap_wc(base, size);
+       return (struct io_mapping __force *) ioremap_wc(base, size);
 }
 
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 {
-       iounmap(mapping);
+       iounmap((void __force __iomem *) mapping);
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
 }
 
 /* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
 }
 
index ab9e9e89e4074318405a595c1147642e344255c0..e62683ba88e6824e72b3e8c81c7315998868f606 100644 (file)
@@ -58,7 +58,7 @@ struct ipv6_opt_hdr {
        /* 
         * TLV encoded option data follows.
         */
-} __packed;    /* required for some archs */
+} __attribute__((packed));     /* required for some archs */
 
 #define ipv6_destopt_hdr ipv6_opt_hdr
 #define ipv6_hopopt_hdr  ipv6_opt_hdr
@@ -99,7 +99,7 @@ struct ipv6_destopt_hao {
        __u8                    type;
        __u8                    length;
        struct in6_addr         addr;
-} __packed;
+} __attribute__((packed));
 
 /*
  *     IPv6 fixed header
index 4aa95f203f3ee773a6ab4bbdbb632efaf970785e..62dbee554f608c91fe7b2b3bf01f390260821c52 100644 (file)
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.in = __tmp->kfifo.out = 0; \
 })
 
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset_out(fifo)  \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.out = __tmp->kfifo.in; \
 })
 
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_len(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpl = (fifo); \
+       typeof((fifo) + 1) __tmpl = (fifo); \
        __tmpl->kfifo.in - __tmpl->kfifo.out; \
 })
 
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_empty(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        __tmpq->kfifo.in == __tmpq->kfifo.out; \
 })
 
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_full(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
 })
 
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
 #define        kfifo_avail(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
        unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
        (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_skip(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
 #define kfifo_peek_len(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
 #define kfifo_alloc(fifo, size, gfp_mask) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_free(fifo) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__is_kfifo_ptr(__tmp)) \
                __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_init(fifo, buffer, size) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_put(fifo, val) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_get(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_peek(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_in(fifo, buf, n) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_from_user(fifo, from, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_to_user(fifo, to, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_in_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_in_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_out_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo);  \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_out_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out_peek(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
index cf343a852534bee2f3ed02ffd9cce21210caac8f..7950a37a71466b773ac72d1880e834d20b1d0c14 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/kref.h>
+#include <linux/kobject_ns.h>
 #include <linux/kernel.h>
 #include <linux/wait.h>
 #include <asm/atomic.h>
@@ -136,42 +137,8 @@ struct kobj_attribute {
 
 extern const struct sysfs_ops kobj_sysfs_ops;
 
-/*
- * Namespace types which are used to tag kobjects and sysfs entries.
- * Network namespace will likely be the first.
- */
-enum kobj_ns_type {
-       KOBJ_NS_TYPE_NONE = 0,
-       KOBJ_NS_TYPE_NET,
-       KOBJ_NS_TYPES
-};
-
 struct sock;
 
-/*
- * Callbacks so sysfs can determine namespaces
- *   @current_ns: return calling task's namespace
- *   @netlink_ns: return namespace to which a sock belongs (right?)
- *   @initial_ns: return the initial namespace (i.e. init_net_ns)
- */
-struct kobj_ns_type_operations {
-       enum kobj_ns_type type;
-       const void *(*current_ns)(void);
-       const void *(*netlink_ns)(struct sock *sk);
-       const void *(*initial_ns)(void);
-};
-
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
-int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
-
-const void *kobj_ns_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
-
-
 /**
  * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
  *
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
new file mode 100644 (file)
index 0000000..82cb5bf
--- /dev/null
@@ -0,0 +1,56 @@
+/* Kernel object name space definitions
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2006-2008 Novell Inc.
+ *
+ * Split from kobject.h by David Howells (dhowells@redhat.com)
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors.
+ */
+
+#ifndef _LINUX_KOBJECT_NS_H
+#define _LINUX_KOBJECT_NS_H
+
+struct sock;
+struct kobject;
+
+/*
+ * Namespace types which are used to tag kobjects and sysfs entries.
+ * Network namespace will likely be the first.
+ */
+enum kobj_ns_type {
+       KOBJ_NS_TYPE_NONE = 0,
+       KOBJ_NS_TYPE_NET,
+       KOBJ_NS_TYPES
+};
+
+/*
+ * Callbacks so sysfs can determine namespaces
+ *   @current_ns: return calling task's namespace
+ *   @netlink_ns: return namespace to which a sock belongs (right?)
+ *   @initial_ns: return the initial namespace (i.e. init_net_ns)
+ */
+struct kobj_ns_type_operations {
+       enum kobj_ns_type type;
+       const void *(*current_ns)(void);
+       const void *(*netlink_ns)(struct sock *sk);
+       const void *(*initial_ns)(void);
+};
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+int kobj_ns_type_registered(enum kobj_ns_type type);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+
+const void *kobj_ns_current(enum kobj_ns_type type);
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
+const void *kobj_ns_initial(enum kobj_ns_type type);
+void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
+
+#endif /* _LINUX_KOBJECT_NS_H */
index 74d691ee9121c5bb3aa8336d7eeffcc03d88cc46..3319a6967626e02f91c340080b21950a8310a67f 100644 (file)
@@ -16,6 +16,9 @@
 struct stable_node;
 struct mem_cgroup;
 
+struct page *ksm_does_need_to_copy(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address);
+
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_does_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
        struct anon_vma *anon_vma = page_anon_vma(page);
 
-       if (!anon_vma ||
-           (anon_vma->root == vma->anon_vma->root &&
-            page->index == linear_page_index(vma, address)))
-               return page;
-
-       return ksm_does_need_to_copy(page, vma, address);
+       return anon_vma &&
+               (anon_vma->root != vma->anon_vma->root ||
+                page->index != linear_page_index(vma, address));
 }
 
 int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
-       return page;
+       return 0;
 }
 
 static inline int page_referenced_ksm(struct page *page,
index b288cb713b902182cca71156e5d9f75c7452a117..f549056fb20bd5533555918cc1b1f9805c2cdcc3 100644 (file)
        int i;                                                          \
        preempt_disable();                                              \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock(void) {                                     \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
index f010f18a0f863f39e139d65469debae94232b259..45fb2967b66d6949094289a467ae120d12014c0d 100644 (file)
@@ -335,6 +335,7 @@ enum {
        ATA_EHI_HOTPLUGGED      = (1 << 0),  /* could have been hotplugged */
        ATA_EHI_NO_AUTOPSY      = (1 << 2),  /* no autopsy */
        ATA_EHI_QUIET           = (1 << 3),  /* be quiet */
+       ATA_EHI_NO_RECOVERY     = (1 << 4),  /* no recovery */
 
        ATA_EHI_DID_SOFTRESET   = (1 << 16), /* already soft-reset this port */
        ATA_EHI_DID_HARDRESET   = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
        struct ata_ioports      ioaddr; /* ATA cmd/ctl/dma register blocks */
        u8                      ctl;    /* cache of ATA control register */
        u8                      last_ctl;       /* Cache last written value */
+       struct ata_link*        sff_pio_task_link; /* link currently used */
        struct delayed_work     sff_pio_task;
 #ifdef CONFIG_ATA_BMDMA
        struct ata_bmdma_prd    *bmdma_prd;     /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
 extern void ata_sff_irq_clear(struct ata_port *ap);
 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                            u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
 extern unsigned int ata_sff_port_intr(struct ata_port *ap,
index bafffc737903971934037fc334f7d4158d9eda48..18fd13028ba1aab8207a1baa9c5b1527435d3a01 100644 (file)
@@ -33,6 +33,7 @@
 #define MWAVE_MINOR            219     /* ACP/Mwave Modem */
 #define MPT_MINOR              220
 #define MPT2SAS_MINOR          221
+#define UINPUT_MINOR           223
 #define HPET_MINOR             228
 #define FUSE_MINOR             229
 #define KVM_MINOR              232
index 709f6728fc90e223c9f41092d626b7b7fc4458bd..74949fbef8c608b9c5ef6dab3b508041a11243f3 100644 (file)
@@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_MAYSHARE    0x00000080
 
 #define VM_GROWSDOWN   0x00000100      /* general info on the segment */
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
 #define VM_GROWSUP     0x00000200
+#else
+#define VM_GROWSUP     0x00000000
+#endif
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */
 
@@ -860,6 +864,12 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
@@ -1330,8 +1340,10 @@ unsigned long ra_submit(struct file_ra_state *ra,
 
 /* Do stack extension */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-#ifdef CONFIG_IA64
+#if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#else
+  #define expand_upwards(vma, address) do { } while (0)
 #endif
 extern int expand_stack_downwards(struct vm_area_struct *vma,
                                  unsigned long address);
@@ -1357,7 +1369,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 }
 
+#ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
+#else
+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+       return __pgprot(0);
+}
+#endif
+
 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
                        unsigned long pfn, unsigned long size, pgprot_t);
index 329a8faa6e37bb32bd6e65f1b8fd758ac45baa36..245cdacee5443791eb5418d10bde3daa190351a9 100644 (file)
@@ -38,6 +38,8 @@
  *      [8:0] Byte/block count
  */
 
+#define R4_MEMORY_PRESENT (1 << 27)
+
 /*
   SDIO status in R5
   Type
index 6e6e62648a4d4a6d792fe207d42105563b112aaa..3984c4eb41fdc9c85759dbffe308df5a806391f4 100644 (file)
@@ -283,6 +283,13 @@ struct zone {
        /* zone watermarks, access with *_wmark_pages(zone) macros */
        unsigned long watermark[NR_WMARK];
 
+       /*
+        * When free pages are below this point, additional steps are taken
+        * when reading the number of free pages to avoid per-cpu counter
+        * drift allowing watermarks to be breached
+        */
+       unsigned long percpu_drift_mark;
+
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
index 878cab4f5fcc5db95585184c22aea5d905a34295..f363bc8fdc74c821c99aa59d5bfcb9554c012c9a 100644 (file)
@@ -78,6 +78,14 @@ struct mutex_waiter {
 # include <linux/mutex-debug.h>
 #else
 # define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
 # define mutex_init(mutex) \
 do {                                                   \
        static struct lock_class_key __key;             \
index bb58854a806196d8dea6ab9599daa244012ef5d8..d146ca10c0f52449dd528e9fec61be47e0c85221 100644 (file)
@@ -88,7 +88,7 @@ struct nbd_request {
        char handle[8];
        __be64 from;
        __be32 len;
-} __packed;
+} __attribute__((packed));
 
 /*
  * This is the reply packet that nbd-server sends back to the client after
index 3ace8370e61e9855cfda5d7237362c1538c701a6..99f0adeeb3f348e58c65312133217055a6ccf3c0 100644 (file)
@@ -27,7 +27,7 @@ struct ncp_request_header {
        __u8 conn_high;
        __u8 function;
        __u8 data[0];
-} __packed;
+} __attribute__((packed));
 
 #define NCP_REPLY                (0x3333)
 #define NCP_WATCHDOG            (0x3E3E)
@@ -42,7 +42,7 @@ struct ncp_reply_header {
        __u8 completion_code;
        __u8 connection_state;
        __u8 data[0];
-} __packed;
+} __attribute__((packed));
 
 #define NCP_VOLNAME_LEN (16)
 #define NCP_NUMBER_OF_VOLUMES (256)
@@ -158,7 +158,7 @@ struct nw_info_struct {
 #ifdef __KERNEL__
        struct nw_nfs_info nfs;
 #endif
-} __packed;
+} __attribute__((packed));
 
 /* modify mask - use with MODIFY_DOS_INFO structure */
 #define DM_ATTRIBUTES            (cpu_to_le32(0x02))
@@ -190,12 +190,12 @@ struct nw_modify_dos_info {
        __u16 inheritanceGrantMask;
        __u16 inheritanceRevokeMask;
        __u32 maximumSpace;
-} __packed;
+} __attribute__((packed));
 
 struct nw_search_sequence {
        __u8 volNumber;
        __u32 dirBase;
        __u32 sequence;
-} __packed;
+} __attribute__((packed));
 
 #endif                         /* _LINUX_NCP_H */
index 3e1aa1be942ef2297637d0bf4a778eec605d7040..208ae938733143ce0ba2117378423d443a3d8312 100644 (file)
@@ -39,7 +39,7 @@ struct idletimer_tg_info {
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
        /* for kernel module internal use only */
-       struct idletimer_tg *timer __attribute((aligned(8)));
+       struct idletimer_tg *timer __attribute__((aligned(8)));
 };
 
 #endif
index 1167aeb7a34793ff288aff2b7190a966378e5080..eff34ac1880883f70d88282a8ac881a1e8bff8a5 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_IPVS_H
 #define _XT_IPVS_H
 
+#include <linux/types.h>
+
 enum {
        XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */
        XT_IPVS_PROTO =         1 << 1,
index b1d17956a1536ff9f1e913cc6f33410f2e54cfa8..c8d95e369ff441fe54760192ce4a0d9922f0508a 100644 (file)
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
                                                unsigned int devfn)
 { return NULL; }
 
+static inline int pci_domain_nr(struct pci_bus *bus)
+{ return 0; }
+
 #define dev_is_pci(d) (false)
 #define dev_is_pf(d) (false)
 #define dev_num_vf(d) (0)
index f6a3b2d36cadde8e1a6684d1c9fdc408eebef9b2..10d33309e9a61351aa4d3597540f201c97dc9294 100644 (file)
 #define PCI_DEVICE_ID_P2010            0x0079
 #define PCI_DEVICE_ID_P1020E           0x0100
 #define PCI_DEVICE_ID_P1020            0x0101
+#define PCI_DEVICE_ID_P1021E           0x0102
+#define PCI_DEVICE_ID_P1021            0x0103
 #define PCI_DEVICE_ID_P1011E           0x0108
 #define PCI_DEVICE_ID_P1011            0x0109
 #define PCI_DEVICE_ID_P1022E           0x0110
index b8b9084527b18029f602c02fe9db5d7c5754364b..49466b13c5c6b310f36b31c052573864df6f02a0 100644 (file)
@@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
 
 #else /* CONFIG_SMP */
 
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
 
 /* can't distinguish from other static vars, always false */
 static inline bool is_kernel_percpu_address(unsigned long addr)
index 24426c3d6b5ab34a463cf3ee650b497d9e545522..76edadf046d3d3b68215c23ef2a30ce18a86992a 100644 (file)
@@ -56,7 +56,7 @@ struct phonethdr {
        __be16  pn_length;
        __u8    pn_robj;
        __u8    pn_sobj;
-} __packed;
+} __attribute__((packed));
 
 /* Common Phonet payload header */
 struct phonetmsg {
@@ -98,7 +98,7 @@ struct sockaddr_pn {
        __u8 spn_dev;
        __u8 spn_resource;
        __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
-} __packed;
+} __attribute__((packed));
 
 /* Well known address */
 #define PN_DEV_PC      0x10
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
new file mode 100644 (file)
index 0000000..18d75e7
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *pxa168 ethernet platform device data definition file.
+ */
+#ifndef __LINUX_PXA168_ETH_H
+#define __LINUX_PXA168_ETH_H
+
+struct pxa168_eth_platform_data {
+       int     port_number;
+       int     phy_addr;
+
+       /*
+        * If speed is 0, then speed and duplex are autonegotiated.
+        */
+       int     speed;          /* 0, SPEED_10, SPEED_100 */
+       int     duplex;         /* DUPLEX_HALF or DUPLEX_FULL */
+
+       /*
+        * Override default RX/TX queue sizes if nonzero.
+        */
+       int     rx_queue_size;
+       int     tx_queue_size;
+
+       /*
+        * init callback is used for board specific initialization
+        * e.g on Aspenite its used to initialize the PHY transceiver.
+        */
+       int (*init)(void);
+};
+
+#endif /* __LINUX_PXA168_ETH_H */
index 4f82326eb2945f2cd275bbcff64151a06ca6ccb4..08c32e4f261aca004ac06d895a1a8bbd73cfd887 100644 (file)
@@ -81,7 +81,7 @@ struct rfkill_event {
        __u8  type;
        __u8  op;
        __u8  soft, hard;
-} __packed;
+} __attribute__((packed));
 
 /*
  * We are planning to be backward and forward compatible with changes
index 7415839ac890f538b88611f7843b5b770e73292f..5310d27abd2a503ad523059ea4832f34ecfbb194 100644 (file)
@@ -26,6 +26,9 @@ struct semaphore {
        .wait_list      = LIST_HEAD_INIT((name).wait_list),             \
 }
 
+#define DEFINE_SEMAPHORE(name) \
+       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
 #define DECLARE_MUTEX(name)    \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
index 1ebc694a6d521575165ad8443a472e0268e36dff..ef914061511ec833c706a38ddf47cac8ad52d93a 100644 (file)
@@ -77,8 +77,7 @@ struct serial_struct {
 #define PORT_16654     11
 #define PORT_16850     12
 #define PORT_RSA       13      /* RSA-DV II/S card */
-#define PORT_U6_16550A 14
-#define PORT_MAX       14
+#define PORT_MAX       13
 
 #define SERIAL_IO_PORT 0
 #define SERIAL_IO_HUB6 1
index 3c2ad99fed347e282d913ea2ad43082b6606b248..563e234009130ec557d1d6cb7d58b1875488d035 100644 (file)
@@ -44,7 +44,8 @@
 #define PORT_RM9000    16      /* PMC-Sierra RM9xxx internal UART */
 #define PORT_OCTEON    17      /* Cavium OCTEON internal UART */
 #define PORT_AR7       18      /* Texas Instruments AR7 internal UART */
-#define PORT_MAX_8250  18      /* max port ID */
+#define PORT_U6_16550A 19      /* ST-Ericsson U6xxx internal UART */
+#define PORT_MAX_8250  19      /* max port ID */
 
 /*
  * ARM specific type numbers.  These are not currently guaranteed
@@ -465,7 +466,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 #ifdef SUPPORT_SYSRQ
        if (port->sysrq) {
                if (ch && time_before(jiffies, port->sysrq)) {
-                       handle_sysrq(ch, port->state->port.tty);
+                       handle_sysrq(ch);
                        port->sysrq = 0;
                        return 1;
                }
index 2fee51a11b7399aea7ea7427ae32777b45cf8903..7cdd63366f883a164a7f5d5b74ff8882c62b4f8c 100644 (file)
@@ -19,6 +19,7 @@ struct bio;
 #define SWAP_FLAG_PREFER       0x8000  /* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK    0x7fff
 #define SWAP_FLAG_PRIO_SHIFT   0
+#define SWAP_FLAG_DISCARD      0x10000 /* discard swap cluster after use */
 
 static inline int current_is_kswapd(void)
 {
@@ -142,7 +143,7 @@ struct swap_extent {
 enum {
        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
-       SWP_DISCARDABLE = (1 << 2),     /* blkdev supports discard */
+       SWP_DISCARDABLE = (1 << 2),     /* swapon+blkdev support discard */
        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
 extern long total_swap_pages;
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
 /* linux/mm/thrash.c */
 extern struct mm_struct *swap_token_mm;
 extern void grab_swap_token(struct mm_struct *);
index 3c92121ba9afb3f75700e540e24c52f3aae8b974..96eb576d82fdadbd6a3cce0253325c733f6603f3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/kobject_ns.h>
 #include <asm/atomic.h>
 
 struct kobject;
index 609e8ca5f53429bddfc2c29aee5e0411cfe464af..387fa7d05c982b758942f83395e328949324fc1a 100644 (file)
@@ -15,9 +15,7 @@
 #define _LINUX_SYSRQ_H
 
 #include <linux/errno.h>
-
-struct pt_regs;
-struct tty_struct;
+#include <linux/types.h>
 
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
@@ -31,7 +29,7 @@ struct tty_struct;
 #define SYSRQ_ENABLE_RTNICE    0x0100
 
 struct sysrq_key_op {
-       void (*handler)(int, struct tty_struct *);
+       void (*handler)(int);
        char *help_msg;
        char *action_msg;
        int enable_mask;
@@ -44,8 +42,8 @@ struct sysrq_key_op {
  * are available -- else NULL's).
  */
 
-void handle_sysrq(int key, struct tty_struct *tty);
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
+void handle_sysrq(int key);
+void __handle_sysrq(int key, bool check_mask);
 int register_sysrq_key(int key, struct sysrq_key_op *op);
 int unregister_sysrq_key(int key, struct sysrq_key_op *op);
 struct sysrq_key_op *__sysrq_get_key_op(int key);
@@ -54,7 +52,11 @@ int sysrq_toggle_support(int enable_mask);
 
 #else
 
-static inline void handle_sysrq(int key, struct tty_struct *tty)
+static inline void handle_sysrq(int key)
+{
+}
+
+static inline void __handle_sysrq(int key, bool check_mask)
 {
 }
 
index 60c81da77f0f36b94bde7d83562db55dfa707d92..05f7fed2b173eb8205b0754ee3a00de730215e8e 100644 (file)
@@ -37,7 +37,6 @@
 #define UINPUT_VERSION         3
 
 #ifdef __KERNEL__
-#define UINPUT_MINOR           223
 #define UINPUT_NAME            "uinput"
 #define UINPUT_BUFFER_SIZE     16
 #define UINPUT_NUM_REQUESTS    16
index 890bc1472190f2189026e17792750f0f14706ff3..617068134ae8a91cadc1f7ccf42761cf212b5adf 100644 (file)
@@ -247,6 +247,7 @@ int usb_add_config(struct usb_composite_dev *,
  *     value; it should return zero on successful initialization.
  * @unbind: Reverses @bind(); called as a side effect of unregistering
  *     this driver.
+ * @disconnect: optional driver disconnect method
  * @suspend: Notifies when the host stops sending USB traffic,
  *     after function notifications
  * @resume: Notifies configuration when the host restarts USB traffic,
index 84a4c44c208b78a7eb9602091bc5a1401afdab8d..55675b1efb28659b37ff7b165fc6aa6ab0835fd7 100644 (file)
@@ -342,8 +342,7 @@ extern int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
 extern void usb_serial_generic_process_read_urb(struct urb *urb);
 extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
                                                void *dest, size_t size);
-extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
-                                       struct usb_serial_port *port,
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
                                        unsigned int ch);
 extern int usb_serial_handle_break(struct usb_serial_port *port);
 
index 6228b5b77d35860019adcf84747819e0e9e43ea6..e9e1524b582cbde9b1988107d9c50a86a68f08ae 100644 (file)
@@ -93,8 +93,11 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
  *     Nested calls are supported (a per-resource counter is maintained)
  */
 
-extern int vga_get(struct pci_dev *pdev, unsigned int rsrc,
-                                                                                       int interruptible);
+#if defined(CONFIG_VGA_ARB)
+extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+#else
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
+#endif
 
 /**
  *     vga_get_interruptible
@@ -131,7 +134,11 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
  *     are already locked by another card. It can be called in any context
  */
 
+#if defined(CONFIG_VGA_ARB)
 extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
+#else
+static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
+#endif
 
 /**
  *     vga_put         - release lock on legacy VGA resources
@@ -146,7 +153,11 @@ extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
  *     released if the counter reaches 0.
  */
 
+#if defined(CONFIG_VGA_ARB)
 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+#else
+#define vga_put(pdev, rsrc)
+#endif
 
 
 /**
index 7f43ccdc1d38c0eb919efe4891e1b91ec19c3705..eaaea37b3b75dd64b73a34a0e3beb31417bdd0d6 100644 (file)
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+{
+       long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+       int cpu;
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
+}
+
 extern unsigned long global_reclaimable_pages(void);
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
index 4f9d277bcd9a5cd32ef35bc304bdbe3f6339a38a..f11100f964824c250b4eacb21e96e36640527ac6 100644 (file)
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
 
 enum {
        WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
-       WORK_STRUCT_CWQ_BIT     = 1,    /* data points to cwq */
-       WORK_STRUCT_LINKED_BIT  = 2,    /* next work is linked to this one */
+       WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */
+       WORK_STRUCT_CWQ_BIT     = 2,    /* data points to cwq */
+       WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
-       WORK_STRUCT_STATIC_BIT  = 3,    /* static initializer (debugobjects) */
-       WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
+       WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
+       WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */
 #else
-       WORK_STRUCT_COLOR_SHIFT = 3,    /* color for workqueue flushing */
+       WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
 #endif
 
        WORK_STRUCT_COLOR_BITS  = 4,
 
        WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
+       WORK_STRUCT_DELAYED     = 1 << WORK_STRUCT_DELAYED_BIT,
        WORK_STRUCT_CWQ         = 1 << WORK_STRUCT_CWQ_BIT,
        WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@ enum {
 
        /*
         * Reserve 7 bits off of cwq pointer w/ debugobjects turned
-        * off.  This makes cwqs aligned to 128 bytes which isn't too
-        * excessive while allowing 15 workqueue flush colors.
+        * off.  This makes cwqs aligned to 256 bytes and allows 15
+        * workqueue flush colors.
         */
        WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
                                  WORK_STRUCT_COLOR_BITS,
@@ -241,6 +243,8 @@ enum {
        WQ_HIGHPRI              = 1 << 4, /* high priority */
        WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
 
+       WQ_DYING                = 1 << 6, /* internal: workqueue is dying */
+
        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
        WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
        WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
index df6a2eb20193f935bc72204ca22c7f2f21b5070f..eaa9582779d029a9362c32b3816fed8c157e4d3e 100644 (file)
@@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
        return seq3 - seq2 >= seq1 - seq2;
 }
 
-static inline int tcp_too_many_orphans(struct sock *sk, int num)
+static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 {
-       return (num > sysctl_tcp_max_orphans) ||
-               (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-                atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
+       struct percpu_counter *ocp = sk->sk_prot->orphan_count;
+       int orphans = percpu_counter_read_positive(ocp);
+
+       if (orphans << shift > sysctl_tcp_max_orphans) {
+               orphans = percpu_counter_sum_positive(ocp);
+               if (orphans << shift > sysctl_tcp_max_orphans)
+                       return true;
+       }
+
+       if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+           atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+               return true;
+       return false;
 }
 
 /* syncookies: remember time of last synqueue overflow */
index c624126a9c8a6fb889fe596ea261f649236d51a7..425bcfe56c620211678d94f6d3923a9451305db2 100644 (file)
@@ -81,14 +81,16 @@ TRACE_EVENT(timer_expire_entry,
        TP_STRUCT__entry(
                __field( void *,        timer   )
                __field( unsigned long, now     )
+               __field( void *,        function)
        ),
 
        TP_fast_assign(
                __entry->timer          = timer;
                __entry->now            = jiffies;
+               __entry->function       = timer->function;
        ),
 
-       TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
+       TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
 );
 
 /**
@@ -200,14 +202,16 @@ TRACE_EVENT(hrtimer_expire_entry,
        TP_STRUCT__entry(
                __field( void *,        hrtimer )
                __field( s64,           now     )
+               __field( void *,        function)
        ),
 
        TP_fast_assign(
                __entry->hrtimer        = hrtimer;
                __entry->now            = now->tv64;
+               __entry->function       = hrtimer->function;
        ),
 
-       TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
+       TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
                  (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
  );
 
index ce9d671c636c85499d7077361710baab3d50b519..a785a3b0c8c7d89d9a46e1a1d90300cb91da5308 100644 (file)
 #define XEN_IOPORT_PROTOVER    (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */
 #define XEN_IOPORT_PRODNUM     (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */
 
-#define XEN_UNPLUG_ALL_IDE_DISKS 1
-#define XEN_UNPLUG_ALL_NICS 2
-#define XEN_UNPLUG_AUX_IDE_DISKS 4
-#define XEN_UNPLUG_ALL 7
-#define XEN_UNPLUG_IGNORE 8
+#define XEN_UNPLUG_ALL_IDE_DISKS       (1<<0)
+#define XEN_UNPLUG_ALL_NICS            (1<<1)
+#define XEN_UNPLUG_AUX_IDE_DISKS       (1<<2)
+#define XEN_UNPLUG_ALL                 (XEN_UNPLUG_ALL_IDE_DISKS|\
+                                        XEN_UNPLUG_ALL_NICS|\
+                                        XEN_UNPLUG_AUX_IDE_DISKS)
+
+#define XEN_UNPLUG_UNNECESSARY                 (1<<16)
+#define XEN_UNPLUG_NEVER                       (1<<17)
 
 static inline int xen_must_unplug_nics(void) {
 #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
index 192f88c5b0f9df29b80d51d4c5ceba5388d099eb..c9483d8f6140ed6cb4e06fa6139e2aeb907b3d47 100644 (file)
@@ -1791,19 +1791,20 @@ out:
 }
 
 /**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
  * @tsk: the task to be attached
  */
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 {
        struct cgroupfs_root *root;
-       struct cgroup *cur_cg;
        int retval = 0;
 
        cgroup_lock();
        for_each_active_root(root) {
-               cur_cg = task_cgroup_from_root(current, root);
-               retval = cgroup_attach_task(cur_cg, tsk);
+               struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+               retval = cgroup_attach_task(from_cg, tsk);
                if (retval)
                        break;
        }
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
 
        return retval;
 }
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
index 3c2d4972d2352ad1bff0fb31a676f467420b6ddd..de407c78178d9014332255f2d8ae412f9fbd5e5a 100644 (file)
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
 };
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+static void sysrq_handle_dbg(int key)
 {
        if (!dbg_io_ops) {
                printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
index 75bd9b3ebbb7cf501800115a531e86e7c30c5a28..20059ef4459a4ff293428337d9936ff438a8eb96 100644 (file)
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
        int i, bpno;
        kdb_bp_t *bp, *bp_check;
        int diag;
-       int free;
        char *symname = NULL;
        long offset = 0ul;
        int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
        /*
         * Find an empty bp structure to allocate
         */
-       free = KDB_MAXBPT;
        for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
                if (bp->bp_free)
                        break;
index 28b844118bbd9d27f1e6bede2fd2502c6fd4f1b8..caf057a3de0e8457cb35e92a4b371f4f4597df3a 100644 (file)
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
        if (argc != 1)
                return KDB_ARGCOUNT;
        kdb_trap_printk++;
-       __handle_sysrq(*argv[1], NULL, 0);
+       __handle_sysrq(*argv[1], false);
        kdb_trap_printk--;
 
        return 0;
index ef3c3f88a7a35e36d8f1fb551c56e534cea52687..f83972b16564d00676154900f09d3c70affd773c 100644 (file)
  * @children: child nodes
  * @all: list head for list of all nodes
  * @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- *         copy of the profiling data here to allow collecting coverage data
- *         for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ *   files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ *   object files. Used only when gcov_persist=1.
  * @dentry: main debugfs entry, either a directory or data file
  * @links: associated symbolic links
  * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
        struct list_head children;
        struct list_head all;
        struct gcov_node *parent;
-       struct gcov_info *info;
-       struct gcov_info *ghost;
+       struct gcov_info **loaded_info;
+       struct gcov_info *unloaded_info;
        struct dentry *dentry;
        struct dentry **links;
+       int num_loaded;
        char name[0];
 };
 
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
 };
 
 /*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
  */
 static struct gcov_info *get_node_info(struct gcov_node *node)
 {
-       if (node->info)
-               return node->info;
+       if (node->num_loaded > 0)
+               return node->loaded_info[0];
 
-       return node->ghost;
+       return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+       struct gcov_info *info;
+       int i = 0;
+
+       if (node->unloaded_info)
+               info = gcov_info_dup(node->unloaded_info);
+       else
+               info = gcov_info_dup(node->loaded_info[i++]);
+       if (!info)
+               return NULL;
+       for (; i < node->num_loaded; i++)
+               gcov_info_add(info, node->loaded_info[i]);
+
+       return info;
 }
 
 /*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
        mutex_lock(&node_lock);
        /*
         * Read from a profiling data copy to minimize reference tracking
-        * complexity and concurrent access.
+        * complexity and concurrent access and to keep accumulating multiple
+        * profiling data sets associated with one node simple.
         */
-       info = gcov_info_dup(get_node_info(node));
+       info = get_accumulated_info(node);
        if (!info)
                goto out_unlock;
        iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
        return NULL;
 }
 
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+       int i;
+
+       if (node->unloaded_info)
+               gcov_info_reset(node->unloaded_info);
+       for (i = 0; i < node->num_loaded; i++)
+               gcov_info_reset(node->loaded_info[i]);
+}
+
 static void remove_node(struct gcov_node *node);
 
 /*
  * write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
  */
 static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
                              size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
        node = get_node_by_name(info->filename);
        if (node) {
                /* Reset counts or remove node for unloaded modules. */
-               if (node->ghost)
+               if (node->num_loaded == 0)
                        remove_node(node);
                else
-                       gcov_info_reset(node->info);
+                       reset_node(node);
        }
        /* Reset counts for open file. */
        gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
        INIT_LIST_HEAD(&node->list);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->all);
-       node->info = info;
+       if (node->loaded_info) {
+               node->loaded_info[0] = info;
+               node->num_loaded = 1;
+       }
        node->parent = parent;
        if (name)
                strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        struct gcov_node *node;
 
        node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
-       if (!node) {
-               pr_warning("out of memory\n");
-               return NULL;
+       if (!node)
+               goto err_nomem;
+       if (info) {
+               node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+                                          GFP_KERNEL);
+               if (!node->loaded_info)
+                       goto err_nomem;
        }
        init_node(node, info, name, parent);
        /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        list_add(&node->all, &all_head);
 
        return node;
+
+err_nomem:
+       kfree(node);
+       pr_warning("out of memory\n");
+       return NULL;
 }
 
 /* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
        list_del(&node->all);
        debugfs_remove(node->dentry);
        remove_links(node);
-       if (node->ghost)
-               gcov_info_free(node->ghost);
+       kfree(node->loaded_info);
+       if (node->unloaded_info)
+               gcov_info_free(node->unloaded_info);
        kfree(node);
 }
 
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
 
 /*
  * write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
  */
 static ssize_t reset_write(struct file *file, const char __user *addr,
                           size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
        mutex_lock(&node_lock);
 restart:
        list_for_each_entry(node, &all_head, all) {
-               if (node->info)
-                       gcov_info_reset(node->info);
+               if (node->num_loaded > 0)
+                       reset_node(node);
                else if (list_empty(&node->children)) {
                        remove_node(node);
                        /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
 }
 
 /*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
  */
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
 {
-       node->ghost = gcov_info_dup(node->info);
-       if (!node->ghost) {
-               pr_warning("could not save data for '%s' (out of memory)\n",
-                          node->info->filename);
-               return -ENOMEM;
+       struct gcov_info **loaded_info;
+       int num = node->num_loaded;
+
+       /*
+        * Prepare new array. This is done first to simplify cleanup in
+        * case the new data set is incompatible, the node only contains
+        * unloaded data sets and there's not enough memory for the array.
+        */
+       loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+       if (!loaded_info) {
+               pr_warning("could not add '%s' (out of memory)\n",
+                          info->filename);
+               return;
+       }
+       memcpy(loaded_info, node->loaded_info,
+              num * sizeof(struct gcov_info *));
+       loaded_info[num] = info;
+       /* Check if the new data set is compatible. */
+       if (num == 0) {
+               /*
+                * A module was unloaded, modified and reloaded. The new
+                * data set replaces the copy of the last one.
+                */
+               if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+                       pr_warning("discarding saved data for %s "
+                                  "(incompatible version)\n", info->filename);
+                       gcov_info_free(node->unloaded_info);
+                       node->unloaded_info = NULL;
+               }
+       } else {
+               /*
+                * Two different versions of the same object file are loaded.
+                * The initial one takes precedence.
+                */
+               if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+                       pr_warning("could not add '%s' (incompatible "
+                                  "version)\n", info->filename);
+                       kfree(loaded_info);
+                       return;
+               }
        }
-       node->info = NULL;
+       /* Overwrite previous array. */
+       kfree(node->loaded_info);
+       node->loaded_info = loaded_info;
+       node->num_loaded = num + 1;
+}
 
-       return 0;
+/*
+ * Return the index of a profiling data set associated with a node.
+ */
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       for (i = 0; i < node->num_loaded; i++) {
+               if (node->loaded_info[i] == info)
+                       return i;
+       }
+       return -ENOENT;
 }
 
 /*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Save the data of a profiling data set which is being unloaded.
  */
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static void save_info(struct gcov_node *node, struct gcov_info *info)
 {
-       if (gcov_info_is_compatible(node->ghost, info))
-               gcov_info_add(info, node->ghost);
+       if (node->unloaded_info)
+               gcov_info_add(node->unloaded_info, info);
        else {
-               pr_warning("discarding saved data for '%s' (version changed)\n",
+               node->unloaded_info = gcov_info_dup(info);
+               if (!node->unloaded_info) {
+                       pr_warning("could not save data for '%s' "
+                                  "(out of memory)\n", info->filename);
+               }
+       }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       i = get_info_index(node, info);
+       if (i < 0) {
+               pr_warning("could not remove '%s' (not found)\n",
                           info->filename);
+               return;
        }
-       gcov_info_free(node->ghost);
-       node->ghost = NULL;
-       node->info = info;
+       if (gcov_persist)
+               save_info(node, info);
+       /* Shrink array. */
+       node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+       node->num_loaded--;
+       if (node->num_loaded > 0)
+               return;
+       /* Last loaded data set was removed. */
+       kfree(node->loaded_info);
+       node->loaded_info = NULL;
+       node->num_loaded = 0;
+       if (!node->unloaded_info)
+               remove_node(node);
 }
 
 /*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
        node = get_node_by_name(info->filename);
        switch (action) {
        case GCOV_ADD:
-               /* Add new node or revive ghost. */
-               if (!node) {
+               if (node)
+                       add_info(node, info);
+               else
                        add_node(info);
-                       break;
-               }
-               if (gcov_persist)
-                       revive_node(node, info);
-               else {
-                       pr_warning("could not add '%s' (already exists)\n",
-                                  info->filename);
-               }
                break;
        case GCOV_REMOVE:
-               /* Remove node or turn into ghost. */
-               if (!node) {
+               if (node)
+                       remove_info(node, info);
+               else {
                        pr_warning("could not remove '%s' (not found)\n",
                                   info->filename);
-                       break;
                }
-               if (gcov_persist) {
-                       if (!ghost_node(node))
-                               break;
-               }
-               remove_node(node);
                break;
        }
        mutex_unlock(&node_lock);
index 53b1916c94926c245aacd17ccb420fc00d06a19b..253dc0f35cf4c30786d1ff3565427d1374762a04 100644 (file)
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
        right = group_info->ngroups;
        while (left < right) {
                unsigned int mid = (left+right)/2;
-               int cmp = grp - GROUP_AT(group_info, mid);
-               if (cmp > 0)
+               if (grp > GROUP_AT(group_info, mid))
                        left = mid + 1;
-               else if (cmp < 0)
+               else if (grp < GROUP_AT(group_info, mid))
                        right = mid;
                else
                        return 1;
index ce669174f355c7dd1e1893903bb4d18a25f17c34..1decafbb6b1a28197b768021cc987e230d352b5b 100644 (file)
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
  */
 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
 {
-       struct hrtimer_clock_base *base;
        unsigned long flags;
        ktime_t rem;
 
-       base = lock_hrtimer_base(timer, &flags);
+       lock_hrtimer_base(timer, &flags);
        rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
index 4c0b7b3e6d2e9a483c6cb4cc384e979911ed03bb..200407c1502f509ee3f9d8a665bc4d3b78a27f74 100644 (file)
 # include <asm/mutex.h>
 #endif
 
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
 static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_lock - acquire the mutex
  * @lock: the mutex to be acquired
  *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
 
 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_unlock - release the mutex
  * @lock: the mutex to be released
  *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
  * @lock: the mutex to be acquired
  *
  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        return prev == 1;
 }
 
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
  * @lock: the mutex to be acquired
  *
  * Try to acquire the mutex atomically. Returns 1 if the mutex
  * has been acquired successfully, and 0 on contention.
  *
  * NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
  * about this when converting semaphore users to mutexes.
  *
  * This function must not be used in interrupt context. The
index 403d1804b198140e4f1355c70c0b25e6efa9e5d8..657555a5f30fc9399513e9538a205d2e8bf0fff0 100644 (file)
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
        }
 }
 
+static inline int
+event_filter_match(struct perf_event *event)
+{
+       return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
 static void
 event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 delta;
+       /*
+        * An event which could not be activated because of
+        * filter mismatch still needs to have its timings
+        * maintained, otherwise bogus information is return
+        * via read() for time_enabled, time_running:
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE
+           && !event_filter_match(event)) {
+               delta = ctx->time - event->tstamp_stopped;
+               event->tstamp_running += delta;
+               event->tstamp_stopped = ctx->time;
+       }
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_event_context *ctx)
 {
        struct perf_event *event;
-
-       if (group_event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+       int state = group_event->state;
 
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
 
-       if (group_event->attr.exclusive)
+       if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
index 996a4dec5f968406aae7ef9b79240768da1ffddb..b7e4c362361bcf46fe34992e9bba1852dd478b71 100644 (file)
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
 /**
  * pm_qos_add_request - inserts new qos request into the list
- * @pm_qos_class: identifies which list of qos request to us
+ * @dep: pointer to a preallocated handle
+ * @pm_qos_class: identifies which list of qos request to use
  * @value: defines the qos request
  *
  * This function inserts a new entry in the pm_qos_class list of requested qos
  * performance characteristics.  It recomputes the aggregate QoS expectations
- * for the pm_qos_class of parameters, and returns the pm_qos_request list
- * element as a handle for use in updating and removal.  Call needs to save
- * this handle for later use.
+ * for the pm_qos_class of parameters and initializes the pm_qos_request_list
+ * handle.  Caller needs to save this handle for later use in updates and
+ * removal.
  */
+
 void pm_qos_add_request(struct pm_qos_request_list *dep,
                        int pm_qos_class, s32 value)
 {
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
 
        pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
        if (pm_qos_class >= 0) {
-               struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req));
+               struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
                if (!req)
                        return -ENOMEM;
 
index c77963938bca440a90423952d6d85cf4d66abd83..8dc31e02ae129e8f042804b67c38ab02f997d94c 100644 (file)
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
                goto Close;
 
        suspend_console();
-       hibernation_freeze_swap();
        saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
        error = dpm_suspend_start(PMSG_FREEZE);
        if (error)
index e8b337006276e71e782876db74cb94a4bbcb41a8..d52359374e8501e8c1f4e8a88427468249509fb3 100644 (file)
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
 
 static DECLARE_WORK(poweroff_work, do_poweroff);
 
-static void handle_poweroff(int key, struct tty_struct *tty)
+static void handle_poweroff(int key)
 {
        /* run sysrq poweroff on boot cpu */
        schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
index 5e7edfb05e66cff0d2c99d5fc8fddfde03e372c3..f6cd6faf84fdb516323e4257f53a61778c7fd60b 100644 (file)
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
        buffer = NULL;
        alloc_normal = 0;
        alloc_highmem = 0;
-       hibernation_thaw_swap();
 }
 
 /* Helper functions used for the shrinking of memory. */
index 5d0059eed3e4e3ce0bc38ad072bd7b1430d9f712..e6a5bdf61a375c309c1f9ea356e9123d79699037 100644 (file)
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
 {
        unsigned long offset;
 
-       offset = swp_offset(get_swap_for_hibernation(swap));
+       offset = swp_offset(get_swap_page_of_type(swap));
        if (offset) {
                if (swsusp_extents_insert(offset))
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
                else
                        return swapdev_block(swap, offset);
        }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
                ext = container_of(node, struct swsusp_extent, node);
                rb_erase(node, &swsusp_extents);
                for (offset = ext->start; offset <= ext->end; offset++)
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
 
                kfree(ext);
        }
index 41541d79e3c85ac7d367f3ddffc4e51a0f63e755..09b574e7f4df7c14615d104c3fe736f7c1be0847 100644 (file)
@@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
                /*
                 * Owner changed, break to re-assess state.
                 */
-               if (lock->owner != owner)
+               if (lock->owner != owner) {
+                       /*
+                        * If the lock has switched to a different owner,
+                        * we likely have heavy contention. Return 0 to quit
+                        * optimistic spinning and not contend further:
+                        */
+                       if (lock->owner)
+                               return 0;
                        break;
+               }
 
                /*
                 * Is that owner really running on that cpu?
index 806d1b227a21060aac100994a8992266c00b59b5..134f7edb30c6ce4dc45548a25f75cb13f67a269d 100644 (file)
@@ -1313,7 +1313,7 @@ static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int load_idx)
 {
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+       struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                if (local_group) {
                        this_load = avg_load;
-                       this = group;
                } else if (avg_load < min_load) {
                        min_load = avg_load;
                        idlest = group;
@@ -3752,6 +3751,8 @@ static void task_fork_fair(struct task_struct *p)
 
        raw_spin_lock_irqsave(&rq->lock, flags);
 
+       update_rq_clock(rq);
+
        if (unlikely(task_cpu(p) != this_cpu))
                __set_task_cpu(p, this_cpu);
 
index e9ad4448982860af9919df53c3368156a4bf2445..7f5a0cd296a96ca44e43f0db028026094dbbb57a 100644 (file)
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
+       rcu_read_lock();
 
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 out:
        /* All paths lead to here, thus we are safe. -DaveM */
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        return err;
 }
 
index ca38e8e3e907557f74faaad7ddb57d330bd43d2d..f88552c6d2275be1216187f07b1e0e1b22b93af2 100644 (file)
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
 {
        sysctl_set_parent(NULL, root_table);
 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK
-       {
-               int err;
-               err = sysctl_check_table(current->nsproxy, root_table);
-       }
+       sysctl_check_table(current->nsproxy, root_table);
 #endif
        return 0;
 }
index 0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543..7cb1f45a1de1ccf61e9cfcf59e0abe0642f12cf2 100644 (file)
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
+       int ret = 0;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       static DEFINE_MUTEX(mutex);
        static struct trace_seq s;
        unsigned long long avg;
        unsigned long long stddev;
 #endif
+       mutex_lock(&ftrace_profile_lock);
+
+       /* we raced with function_profile_reset() */
+       if (unlikely(rec->counter == 0)) {
+               ret = -EBUSY;
+               goto out;
+       }
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
                do_div(stddev, (rec->counter - 1) * 1000);
        }
 
-       mutex_lock(&mutex);
        trace_seq_init(&s);
        trace_print_graph_duration(rec->time, &s);
        trace_seq_puts(&s, "    ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
        trace_seq_puts(&s, "    ");
        trace_print_graph_duration(stddev, &s);
        trace_print_seq(m, &s);
-       mutex_unlock(&mutex);
 #endif
        seq_putc(m, '\n');
+out:
+       mutex_unlock(&ftrace_profile_lock);
 
-       return 0;
+       return ret;
 }
 
 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
index 19cccc3c302871beae5fd39ad937b0791a2e785d..492197e2f86cda2792603186b59ad3fdd17c448d 100644 (file)
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_advance_iter(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        unsigned length;
 
        cpu_buffer = iter->cpu_buffer;
-       buffer = cpu_buffer->buffer;
 
        /*
         * Check if we are at the end of the buffer.
index 056468eae7cfce7a76bf5c2364f71c722adeb325..a6b7e0e0f3eb092aac06e8bc017dcf5ea80105af 100644 (file)
@@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)
 {
        unsigned long addr = stack_dump_trace[i];
 
-       return seq_printf(m, "%pF\n", (void *)addr);
+       return seq_printf(m, "%pS\n", (void *)addr);
 }
 
 static void print_disabled(struct seq_file *m)
index 613bc1f046108ec52b8f7372fe2e7e66aa8fe4c7..7f9c3c52ecc12ef5d0de1728839218ff87c2dc7b 100644 (file)
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
 
 void touch_softlockup_watchdog(void)
 {
-       __get_cpu_var(watchdog_touch_ts) = 0;
+       __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
-       __get_cpu_var(watchdog_nmi_touch) = true;
+       if (watchdog_enabled) {
+               unsigned cpu;
+
+               for_each_present_cpu(cpu) {
+                       if (per_cpu(watchdog_nmi_touch, cpu) != true)
+                               per_cpu(watchdog_nmi_touch, cpu) = true;
+               }
+       }
        touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -206,6 +213,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
                 struct perf_sample_data *data,
                 struct pt_regs *regs)
 {
+       /* Ensure the watchdog never gets throttled */
+       event->hw.interrupts = 0;
+
        if (__get_cpu_var(watchdog_nmi_touch) == true) {
                __get_cpu_var(watchdog_nmi_touch) = false;
                return;
@@ -430,6 +440,9 @@ static int watchdog_enable(int cpu)
                wake_up_process(p);
        }
 
+       /* if any cpu succeeds, watchdog is considered enabled for the system */
+       watchdog_enabled = 1;
+
        return 0;
 }
 
@@ -452,9 +465,6 @@ static void watchdog_disable(int cpu)
                per_cpu(softlockup_watchdog, cpu) = NULL;
                kthread_stop(p);
        }
-
-       /* if any cpu succeeds, watchdog is considered enabled for the system */
-       watchdog_enabled = 1;
 }
 
 static void watchdog_enable_all_cpus(void)
index 8bd600c020e5cdf5f2454681bc2e89fcc99c47d0..727f24e563aef326b8eba951d2a31a9aa864d32b 100644 (file)
@@ -90,7 +90,8 @@ enum {
 /*
  * Structure fields follow one of the following exclusion rules.
  *
- * I: Set during initialization and read-only afterwards.
+ * I: Modifiable by initialization/destruction paths and read-only for
+ *    everyone else.
  *
  * P: Preemption protected.  Disabling preemption is enough and should
  *    only be modified and accessed from the local cpu.
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
        cpumask_test_and_set_cpu((cpu), (mask))
 #define mayday_clear_cpu(cpu, mask)            cpumask_clear_cpu((cpu), (mask))
 #define for_each_mayday_cpu(cpu, mask)         for_each_cpu((cpu), (mask))
-#define alloc_mayday_mask(maskp, gfp)          alloc_cpumask_var((maskp), (gfp))
+#define alloc_mayday_mask(maskp, gfp)          zalloc_cpumask_var((maskp), (gfp))
 #define free_mayday_mask(mask)                 free_cpumask_var((mask))
 #else
 typedef unsigned long mayday_mask_t;
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct global_cwq *gcwq;
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
+       unsigned int work_flags;
        unsigned long flags;
 
        debug_work_activate(work);
 
+       if (WARN_ON_ONCE(wq->flags & WQ_DYING))
+               return;
+
        /* determine gcwq to use */
        if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        BUG_ON(!list_empty(&work->entry));
 
        cwq->nr_in_flight[cwq->work_color]++;
+       work_flags = work_color_to_flags(cwq->work_color);
 
        if (likely(cwq->nr_active < cwq->max_active)) {
                cwq->nr_active++;
                worklist = gcwq_determine_ins_pos(gcwq, cwq);
-       } else
+       } else {
+               work_flags |= WORK_STRUCT_DELAYED;
                worklist = &cwq->delayed_works;
+       }
 
-       insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
+       insert_work(cwq, work, worklist, work_flags);
 
        spin_unlock_irqrestore(&gcwq->lock, flags);
 }
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
  * bound), %false if offline.
  */
 static bool worker_maybe_bind_and_lock(struct worker *worker)
+__acquires(&gcwq->lock)
 {
        struct global_cwq *gcwq = worker->gcwq;
        struct task_struct *task = worker->task;
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
  * otherwise.
  */
 static bool maybe_create_worker(struct global_cwq *gcwq)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        if (!need_to_create_worker(gcwq))
                return false;
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
        struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
 
        move_linked_works(work, pos, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
        cwq->nr_active++;
 }
 
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
  * @cwq: cwq of interest
  * @color: color of work which left the queue
+ * @delayed: for a delayed work
  *
  * A work either has completed or is removed from pending queue,
  * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * CONTEXT:
  * spin_lock_irq(gcwq->lock).
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
+                                bool delayed)
 {
        /* ignore uncolored works */
        if (color == WORK_NO_COLOR)
                return;
 
        cwq->nr_in_flight[color]--;
-       cwq->nr_active--;
 
-       if (!list_empty(&cwq->delayed_works)) {
-               /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
+       if (!delayed) {
+               cwq->nr_active--;
+               if (!list_empty(&cwq->delayed_works)) {
+                       /* one down, submit a delayed one */
+                       if (cwq->nr_active < cwq->max_active)
+                               cwq_activate_first_delayed(cwq);
+               }
        }
 
        /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
        worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       cwq_dec_nr_in_flight(cwq, work_color, false);
 }
 
 /**
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
                        debug_work_deactivate(work);
                        list_del_init(&work->entry);
                        cwq_dec_nr_in_flight(get_work_cwq(work),
-                                            get_work_color(work));
+                               get_work_color(work),
+                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
                        ret = 1;
                }
        }
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                if (IS_ERR(rescuer->task))
                        goto err;
 
-               wq->rescuer = rescuer;
                rescuer->task->flags |= PF_THREAD_BOUND;
                wake_up_process(rescuer->task);
        }
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
 {
        unsigned int cpu;
 
+       wq->flags |= WQ_DYING;
        flush_workqueue(wq);
 
        /*
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        if (wq->flags & WQ_RESCUER) {
                kthread_stop(wq->rescuer->task);
                free_mayday_mask(wq->mayday_mask);
+               kfree(wq->rescuer);
        }
 
        free_cwqs(wq);
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
  * multiple times.  To be used by cpu_callback.
  */
 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        if (!(gcwq->trustee_state == state ||
              gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
                spin_lock_init(&gcwq->lock);
                INIT_LIST_HEAD(&gcwq->worklist);
                gcwq->cpu = cpu;
-               if (cpu == WORK_CPU_UNBOUND)
-                       gcwq->flags |= GCWQ_DISASSOCIATED;
+               gcwq->flags |= GCWQ_DISASSOCIATED;
 
                INIT_LIST_HEAD(&gcwq->idle_list);
                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker *worker;
 
+               if (cpu != WORK_CPU_UNBOUND)
+                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
                worker = create_worker(gcwq, true);
                BUG_ON(!worker);
                spin_lock_irq(&gcwq->lock);
index b93579504dfaaafee092156c113b36130b0758a7..70af0a7f97c0eb4801e177458d182ab6baad2767 100644 (file)
@@ -123,7 +123,7 @@ static int kobj_usermode_filter(struct kobject *kobj)
  * @kobj: struct kobject that the action is happening to
  * @envp_ext: pointer to environmental data
  *
- * Returns 0 if kobject_uevent() is completed with success or the
+ * Returns 0 if kobject_uevent_env() is completed with success or the
  * corresponding error when it fails.
  */
 int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
@@ -317,7 +317,7 @@ exit:
 EXPORT_SYMBOL_GPL(kobject_uevent_env);
 
 /**
- * kobject_uevent - notify userspace by ending an uevent
+ * kobject_uevent - notify userspace by sending an uevent
  *
  * @action: action that is happening
  * @kobj: struct kobject that the action is happening to
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
new file mode 100644 (file)
index 0000000..162beca
--- /dev/null
@@ -0,0 +1,4 @@
+mktables
+altivec*.c
+int*.c
+tables.c
index f4e516e9c37cc4c62f93faf92131a632098d2ca5..f0fb9124e410c436c0f240d69f089f4935af2e92 100644 (file)
@@ -189,7 +189,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
index 13b6dad1eed272bec61a388d17f116be62cb1bb5..1481de68184bce6d8fae978d3b6be8e3223319a5 100644 (file)
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
-               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
+               flush_dcache_page(tovec->bv_page);
        }
 }
 
index 94cce51b0b3535af75c20f29ecb86a11aba32a71..4d709ee5901370842534224a9f81e7d13943e196 100644 (file)
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
 /* Similar to reclaim, but different enough that they don't share logic */
 static bool too_many_isolated(struct zone *zone)
 {
-
-       unsigned long inactive, isolated;
+       unsigned long active, inactive, isolated;
 
        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
                                        zone_page_state(zone, NR_INACTIVE_ANON);
+       active = zone_page_state(zone, NR_ACTIVE_FILE) +
+                                       zone_page_state(zone, NR_ACTIVE_ANON);
        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
                                        zone_page_state(zone, NR_ISOLATED_ANON);
 
-       return isolated > inactive;
+       return isolated > (inactive + active) / 2;
 }
 
 /*
index e2ae00458320786a380a1ab370efe0dc6cfd6e1a..b1873cf03ed986bcb062259da8f1c4093a97160b 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 {
        struct page *new_page;
 
-       unlock_page(page);      /* any racers will COW it, not modify it */
-
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
                copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
                        add_page_to_unevictable_list(new_page);
        }
 
-       page_cache_release(page);
        return new_page;
 }
 
index 2ed2267439df0f894011ab62adef6d7efe68db9a..71b161b73bb503be50556e9ff302ca0c7eaba396 100644 (file)
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
        struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       page = ksm_might_need_to_copy(page, vma, address);
-       if (!page) {
-               ret = VM_FAULT_OOM;
-               goto out;
+       /*
+        * Make sure try_to_free_swap didn't release the swapcache
+        * from under us. The page pin isn't enough to prevent that.
+        */
+       if (unlikely(!PageSwapCache(page)))
+               goto out_page;
+
+       if (ksm_might_need_to_copy(page, vma, address)) {
+               swapcache = page;
+               page = ksm_does_need_to_copy(page, vma, address);
+
+               if (unlikely(!page)) {
+                       ret = VM_FAULT_OOM;
+                       page = swapcache;
+                       swapcache = NULL;
+                       goto out_page;
+               }
        }
 
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
+       if (swapcache) {
+               /*
+                * Hold the lock to avoid the swap entry to be reused
+                * until we take the PT lock for the pte_same() check
+                * (to avoid false positives from pte_same). For
+                * further safety release the lock after the swap_free
+                * so that the swap count won't change under a
+                * parallel locked swapcache.
+                */
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
 
        if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,15 +2781,17 @@ out_page:
        unlock_page(page);
 out_release:
        page_cache_release(page);
+       if (swapcache) {
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
        return ret;
 }
 
 /*
- * This is like a special single-page "expand_downwards()",
- * except we must first make sure that 'address-PAGE_SIZE'
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
  * doesn't hit another vma.
- *
- * The "find_vma()" will do the right thing even if we wrap
  */
 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
 {
@@ -2783,6 +2810,15 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
 
                expand_stack(vma, address - PAGE_SIZE);
        }
+       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+               struct vm_area_struct *next = vma->vm_next;
+
+               /* As VM_GROWSDOWN but s/below/above/ */
+               if (next && next->vm_start == address + PAGE_SIZE)
+                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+               expand_upwards(vma, address + PAGE_SIZE);
+       }
        return 0;
 }
 
index a4cfcdc00455de4be15fcec98c76e45f8de5feab..dd186c1a5d53f9ebd27de4c1bedcaac471d6c93e 100644 (file)
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
 /* Return the start of the next active pageblock after a given page */
 static struct page *next_active_pageblock(struct page *page)
 {
-       int pageblocks_stride;
-
        /* Ensure the starting page is pageblock-aligned */
        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 
-       /* Move forward by at least 1 * pageblock_nr_pages */
-       pageblocks_stride = 1;
-
        /* If the entire pageblock is free, move to the end of free page */
-       if (pageblock_free(page))
-               pageblocks_stride += page_order(page) - pageblock_order;
+       if (pageblock_free(page)) {
+               int order;
+               /* be careful. we don't have locks, page_order can be changed.*/
+               order = page_order(page);
+               if ((order < MAX_ORDER) && (order >= pageblock_order))
+                       return page + (1 << order);
+       }
 
-       return page + (pageblocks_stride * pageblock_nr_pages);
+       return page + pageblock_nr_pages;
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
index cbae7c5b95680a1bfca1df7e11a215bfce15b57c..b70919ce4f72e6941f67b1a5462f5f270c231536 100644 (file)
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
        return (vma->vm_flags & VM_GROWSDOWN) &&
index 331e51af38c9c950c8295fa125c67d00ba540025..6128dc8e5ede709cada129438fbac101895aa09d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1716,9 +1716,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
-#ifndef CONFIG_IA64
-static
-#endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        int error;
index f5b7d1760213e53db3c46e84dde56daf219ea0cd..e35bfb82c8555b7377334dbea42bfcf588b0bab8 100644 (file)
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+       unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+       /*
+        * While kswapd is awake, it is considered the zone is under some
+        * memory pressure. Under pressure, there is a risk that
+        * per-cpu-counter-drift will allow the min watermark to be breached
+        * potentially causing a live-lock. While kswapd is awake and
+        * free pages are low, get a better estimate for free pages
+        */
+       if (nr_free_pages < zone->percpu_drift_mark &&
+                       !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+               return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+       return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
index c09ef5219cbe36f267a37f55d6fc670815082522..e3bccac1f0255bb78373e6268aab787a370ee520 100644 (file)
@@ -985,22 +985,16 @@ continue_unlock:
                                }
                        }
 
-                       if (wbc->nr_to_write > 0) {
-                               if (--wbc->nr_to_write == 0 &&
-                                   wbc->sync_mode == WB_SYNC_NONE) {
-                                       /*
-                                        * We stop writing back only if we are
-                                        * not doing integrity sync. In case of
-                                        * integrity sync we have to keep going
-                                        * because someone may be concurrently
-                                        * dirtying pages, and we might have
-                                        * synced a lot of newly appeared dirty
-                                        * pages, but have not synced all of the
-                                        * old dirty pages.
-                                        */
-                                       done = 1;
-                                       break;
-                               }
+                       /*
+                        * We stop writing back only if we are not doing
+                        * integrity sync. In case of integrity sync we have to
+                        * keep going until we have written all the pages
+                        * we tagged for writeback prior to entering this loop.
+                        */
+                       if (--wbc->nr_to_write <= 0 &&
+                           wbc->sync_mode == WB_SYNC_NONE) {
+                               done = 1;
+                               break;
                        }
                }
                pagevec_release(&pvec);
@@ -1132,6 +1126,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                task_io_account_write(PAGE_CACHE_SIZE);
        }
 }
+EXPORT_SYMBOL(account_page_dirtied);
 
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
index a9649f4b261e6b3c01632939c46a77f19f447de1..a8cfa9cc6e86e5d6912a39bc3f5c9d18fb97b7cf 100644 (file)
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
+       int to_free = count;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count) {
+       while (to_free) {
                struct page *page;
                struct list_head *list;
 
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--count && --batch_free && !list_empty(list));
+               } while (--to_free && --batch_free && !list_empty(list));
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        spin_unlock(&zone->lock);
 }
 
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
+       bool drained = false;
 
        cond_resched();
 
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
-       if (order != 0)
-               drain_all_pages();
+       if (unlikely(!(*did_some_progress)))
+               return NULL;
 
-       if (likely(*did_some_progress))
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+       page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags, preferred_zone,
                                        migratetype);
+
+       /*
+        * If an allocation failed after direct reclaim, it could be because
+        * pages are pinned on the per-cpu lists. Drain them and try again
+        */
+       if (!page && !drained) {
+               drain_all_pages();
+               drained = true;
+               goto retry;
+       }
+
        return page;
 }
 
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_page_state(zone, NR_FREE_PAGES)),
+                       K(zone_nr_free_pages(zone)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
index e61dc2cc5873d7d89efc45609a4f4f2a8ae75732..58c572b18b07ffbca4e2120d4e1600705db5fc0e 100644 (file)
@@ -393,7 +393,9 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
                goto out_unlock;
 
        old_size = chunk->map_alloc * sizeof(chunk->map[0]);
-       memcpy(new, chunk->map, old_size);
+       old = chunk->map;
+
+       memcpy(new, old, old_size);
 
        chunk->map_alloc = new_alloc;
        chunk->map = new;
@@ -1162,7 +1164,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
                }
 
                /*
-                * Don't accept if wastage is over 25%.  The
+                * Don't accept if wastage is over 1/3.  The
                 * greater-than comparison ensures upa==1 always
                 * passes the following check.
                 */
index c4351c7f57d21db63487d0c06728a64d0c0f1318..db884fae5721749e57990d9f9ac33398efb1ca37 100644 (file)
@@ -14,13 +14,13 @@ void __percpu *__alloc_percpu(size_t size, size_t align)
         * percpu sections on SMP for which this path isn't used.
         */
        WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-       return kzalloc(size, GFP_KERNEL);
+       return (void __percpu __force *)kzalloc(size, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(__alloc_percpu);
 
 void free_percpu(void __percpu *p)
 {
-       kfree(p);
+       kfree(this_cpu_ptr(p));
 }
 EXPORT_SYMBOL_GPL(free_percpu);
 
index 87b9e8ad450962afa1159b763f0aa0a977ab9a88..f6f0d2dda2eae8480860cf57f5a9cfce69820716 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -316,7 +316,7 @@ void __init anon_vma_init(void)
  */
 struct anon_vma *page_lock_anon_vma(struct page *page)
 {
-       struct anon_vma *anon_vma;
+       struct anon_vma *anon_vma, *root_anon_vma;
        unsigned long anon_mapping;
 
        rcu_read_lock();
@@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
                goto out;
 
        anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
-       anon_vma_lock(anon_vma);
-       return anon_vma;
+       root_anon_vma = ACCESS_ONCE(anon_vma->root);
+       spin_lock(&root_anon_vma->lock);
+
+       /*
+        * If this page is still mapped, then its anon_vma cannot have been
+        * freed.  But if it has been unmapped, we have no security against
+        * the anon_vma structure being freed and reused (for another anon_vma:
+        * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
+        * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
+        * anon_vma->root before page_unlock_anon_vma() is called to unlock.
+        */
+       if (page_mapped(page))
+               return anon_vma;
+
+       spin_unlock(&root_anon_vma->lock);
 out:
        rcu_read_unlock();
        return NULL;
index 1f3f9c59a73ab5be4ff4bb37f428364df7544706..7c703ff2f36f0b760b79eb36149084f07621a0a1 100644 (file)
@@ -47,8 +47,6 @@ long nr_swap_pages;
 long total_swap_pages;
 static int least_priority;
 
-static bool swap_for_hibernation;
-
 static const char Bad_file[] = "Bad swap file entry ";
 static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
        if (nr_blocks) {
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        return err;
                cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        break;
 
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                        start_block <<= PAGE_SHIFT - 9;
                        nr_blocks <<= PAGE_SHIFT - 9;
                        if (blkdev_issue_discard(si->bdev, start_block,
-                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
-                                                       BLKDEV_IFL_BARRIER))
+                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
                                break;
                }
 
@@ -320,10 +315,8 @@ checks:
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
 
-       /* reuse swap entry of cache-only swap if not hibernation. */
-       if (vm_swap_full()
-               && usage == SWAP_HAS_CACHE
-               && si->swap_map[offset] == SWAP_HAS_CACHE) {
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
                int swap_was_freed;
                spin_unlock(&swap_lock);
                swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
        spin_lock(&swap_lock);
        if (nr_swap_pages <= 0)
                goto noswap;
-       if (swap_for_hibernation)
-               goto noswap;
        nr_swap_pages--;
 
        for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
        return (swp_entry_t) {0};
 }
 
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+       struct swap_info_struct *si;
+       pgoff_t offset;
+
+       spin_lock(&swap_lock);
+       si = swap_info[type];
+       if (si && (si->flags & SWP_WRITEOK)) {
+               nr_swap_pages--;
+               /* This is called for allocating swap entry, not cache */
+               offset = scan_swap_map(si, 1);
+               if (offset) {
+                       spin_unlock(&swap_lock);
+                       return swp_entry(type, offset);
+               }
+               nr_swap_pages++;
+       }
+       spin_unlock(&swap_lock);
+       return (swp_entry_t) {0};
+}
+
 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 {
        struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
        if (page_swapcount(page))
                return 0;
 
+       /*
+        * Once hibernation has begun to create its image of memory,
+        * there's a danger that one of the calls to try_to_free_swap()
+        * - most probably a call from __try_to_reclaim_swap() while
+        * hibernation is allocating its own swap pages for the image,
+        * but conceivably even a call from memory reclaim - will free
+        * the swap from a page which has already been recorded in the
+        * image as a clean swapcache page, and then reuse its swap for
+        * another page of the image.  On waking from hibernation, the
+        * original page might be freed under memory pressure, then
+        * later read back in from swap, now with the wrong data.
+        *
+        * Hibernation clears bits from gfp_allowed_mask to prevent
+        * memory reclaim from writing to disk, so check that here.
+        */
+       if (!(gfp_allowed_mask & __GFP_IO))
+               return 0;
+
        delete_from_swap_cache(page);
        SetPageDirty(page);
        return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 #endif
 
 #ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
-       int i;
-
-       spin_lock(&swap_lock);
-
-       printk(KERN_INFO "PM: Freeze Swap\n");
-       swap_for_hibernation = true;
-       for (i = 0; i < MAX_SWAPFILES; i++)
-               hibernation_offset[i] = 1;
-       spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
-       spin_lock(&swap_lock);
-       if (swap_for_hibernation) {
-               printk(KERN_INFO "PM: Thaw Swap\n");
-               swap_for_hibernation = false;
-       }
-       spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
-       pgoff_t off;
-       swp_entry_t val = {0};
-       struct swap_info_struct *si;
-
-       spin_lock(&swap_lock);
-
-       si = swap_info[type];
-       if (!si || !(si->flags & SWP_WRITEOK))
-               goto done;
-
-       for (off = hibernation_offset[type]; off < si->max; ++off) {
-               if (!si->swap_map[off])
-                       break;
-       }
-       if (off < si->max) {
-               val = swp_entry(type, off);
-               hibernation_offset[type] = off + 1;
-       }
-done:
-       spin_unlock(&swap_lock);
-       return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
-       /* Nothing to do */
-}
-
 /*
  * Find the swap type that corresponds to given device (if any).
  *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                        p->flags |= SWP_SOLIDSTATE;
                        p->cluster_next = 1 + (random32() % p->highest_bit);
                }
-               if (discard_swap(p) == 0)
+               if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
                        p->flags |= SWP_DISCARDABLE;
        }
 
index f389168f9a837b9c6be4e1f9bb3d0892396315de..355a9e669aaa800d62fa31d2b83110bf76cce9d7 100644 (file)
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
        int threshold;
 
        for_each_populated_zone(zone) {
+               unsigned long max_drift, tolerate_drift;
+
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
                        per_cpu_ptr(zone->pageset, cpu)->stat_threshold
                                                        = threshold;
+
+               /*
+                * Only set percpu_drift_mark if there is a danger that
+                * NR_FREE_PAGES reports the low watermark is ok when in fact
+                * the min watermark could be breached by an allocation
+                */
+               tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+               max_drift = num_online_cpus() * threshold;
+               if (max_drift > tolerate_drift)
+                       zone->percpu_drift_mark = high_wmark_pages(zone) +
+                                       max_drift;
        }
 }
 
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
-                  zone_page_state(zone, NR_FREE_PAGES),
+                  zone_nr_free_pages(zone),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
+               refresh_zone_stat_thresholds();
                start_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
                break;
index 3d59c9bf8febf5064150ff50d22db9d569453131..3bccdd12a2642a06e1c5078b5c4065419a57428a 100644 (file)
@@ -510,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev)
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_join(dev);
 
-       netif_carrier_on(dev);
+       if (netif_carrier_ok(real_dev))
+               netif_carrier_on(dev);
        return 0;
 
 clear_allmulti:
index 2ce79df00680eee3d9d290fd1156ac1d2ab74557..c7d81436213d13e24e67edf2630930e5c4ed08c0 100644 (file)
@@ -112,8 +112,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
                        if (sk) {
                                sock_hold(sk);
                                ax25_destroy_socket(ax25);
-                               sock_put(sk);
                                bh_unlock_sock(sk);
+                               sock_put(sk);
                        } else
                                ax25_destroy_socket(ax25);
                        return;
index 2c911c0759c27bb6e4867b3bce9af0af3946d288..137f23259a93947ce581a9d14c8f6048f3ca2c19 100644 (file)
@@ -162,8 +162,8 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
                if (tmp) {
                        memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
                        atomic_set(&tmp->use, 1);
-                       nf_bridge_put(nf_bridge);
                }
+               nf_bridge_put(nf_bridge);
                nf_bridge = tmp;
        }
        return nf_bridge;
@@ -761,9 +761,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
-           !skb_is_gso(skb))
+           !skb_is_gso(skb)) {
+               /* BUG: Should really parse the IP options here. */
+               memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
                return ip_fragment(skb, br_dev_queue_push_xmit);
-       else
+       else
                return br_dev_queue_push_xmit(skb);
 }
 #else
index eb1602022ac0643af4e4ad655c3de4ae42a200f4..9a699242d104be7fbef70ed4560d6ca3a63ebda3 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/stddef.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
-#include <linux/unaligned/le_byteshift.h>
+#include <asm/unaligned.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
index 9fbe7f7429b0efd036cc9e4a7df06293f85a8f0b..6743146e4d6b49a61204395b49e4ded721de2703 100644 (file)
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        est->last_packets = bstats->packets;
        est->avpps = rate_est->pps<<10;
 
-       spin_lock(&est_tree_lock);
+       spin_lock_bh(&est_tree_lock);
        if (!elist[idx].timer.function) {
                INIT_LIST_HEAD(&elist[idx].list);
                setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 
        list_add_rcu(&est->list, &elist[idx].list);
        gen_add_node(est);
-       spin_unlock(&est_tree_lock);
+       spin_unlock_bh(&est_tree_lock);
 
        return 0;
 }
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
 {
        struct gen_estimator *e;
 
-       spin_lock(&est_tree_lock);
+       spin_lock_bh(&est_tree_lock);
        while ((e = gen_find_node(bstats, rate_est))) {
                rb_erase(&e->node, &est_root);
 
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
                list_del_rcu(&e->list);
                call_rcu(&e->e_rcu, __gen_kill_estimator);
        }
-       spin_unlock(&est_tree_lock);
+       spin_unlock_bh(&est_tree_lock);
 }
 EXPORT_SYMBOL(gen_kill_estimator);
 
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
 
        ASSERT_RTNL();
 
-       spin_lock(&est_tree_lock);
+       spin_lock_bh(&est_tree_lock);
        res = gen_find_node(bstats, rate_est) != NULL;
-       spin_unlock(&est_tree_lock);
+       spin_unlock_bh(&est_tree_lock);
 
        return res;
 }
index 3a2513f0d0c3036bf6c3f688f7e94b3e1d8f197c..26396ff67cf9b2f374e2e593128e88a861934a3e 100644 (file)
@@ -2573,6 +2573,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
+               /* nskb and skb might have different headroom */
+               if (nskb->ip_summed == CHECKSUM_PARTIAL)
+                       nskb->csum_start += skb_headroom(nskb) - headroom;
+
                skb_reset_mac_header(nskb);
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
@@ -2702,8 +2706,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        } else if (skb_gro_len(p) != pinfo->gso_size)
                return -E2BIG;
 
-       headroom = skb_headroom(p);
-       nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+       headroom = NET_SKB_PAD + NET_IP_ALIGN;
+       nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
        if (unlikely(!nskb))
                return -ENOMEM;
 
index 7c3a7d19124995fd89034a4db2254566aab4cdfd..571f8950ed06f585f4dca482037d4b7985c256af 100644 (file)
@@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER
          rp_filter on use:
 
          echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
-          and
+          or
          echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
 
          Note that some distributions enable it in startup scripts.
index 51d6c31679757f58e62368a42edba16d9bfc4d09..e8f4f9a57f1258fb1a589c87a956797019c67504 100644 (file)
@@ -1420,6 +1420,9 @@ static int translate_compat_table(const char *name,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(arpt_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 97b64b22c41214858d6ddb1fb9cf8e292468e23d..d163f2e3b2e99e5f18ae9997d3c74867b3e79354 100644 (file)
@@ -1751,6 +1751,9 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(ipt_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 176e11aaea771795b21c0be6b1453b46c6349f0c..3fb1428e526eedb521057a49624fa28dde8b41cd 100644 (file)
@@ -451,7 +451,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                                if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                                        mask |= POLLOUT | POLLWRNORM;
                        }
-               }
+               } else
+                       mask |= POLLOUT | POLLWRNORM;
 
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
@@ -2011,11 +2012,8 @@ adjudge_to_death:
                }
        }
        if (sk->sk_state != TCP_CLOSE) {
-               int orphan_count = percpu_counter_read_positive(
-                                               sk->sk_prot->orphan_count);
-
                sk_mem_reclaim(sk);
-               if (tcp_too_many_orphans(sk, orphan_count)) {
+               if (tcp_too_many_orphans(sk, 0)) {
                        if (net_ratelimit())
                                printk(KERN_INFO "TCP: too many of orphaned "
                                       "sockets\n");
@@ -3212,7 +3210,7 @@ void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
        unsigned long nr_pages, limit;
-       int order, i, max_share;
+       int i, max_share, cnt;
        unsigned long jiffy = jiffies;
 
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3261,22 +3259,12 @@ void __init tcp_init(void)
                INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
        }
 
-       /* Try to be a bit smarter and adjust defaults depending
-        * on available memory.
-        */
-       for (order = 0; ((1 << order) << PAGE_SHIFT) <
-                       (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
-                       order++)
-               ;
-       if (order >= 4) {
-               tcp_death_row.sysctl_max_tw_buckets = 180000;
-               sysctl_tcp_max_orphans = 4096 << (order - 4);
-               sysctl_max_syn_backlog = 1024;
-       } else if (order < 3) {
-               tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
-               sysctl_tcp_max_orphans >>= (3 - order);
-               sysctl_max_syn_backlog = 128;
-       }
+
+       cnt = tcp_hashinfo.ehash_mask + 1;
+
+       tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+       sysctl_tcp_max_orphans = cnt / 2;
+       sysctl_max_syn_backlog = max(128, cnt / 256);
 
        /* Set the pressure threshold to be a fraction of global memory that
         * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
index 0ec9bd0ae94f2ef37024ac11c5e411f0179c816a..850c737e08e2a4a9185c64c8776711d83b8c7b08 100644 (file)
@@ -196,10 +196,10 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
 int tcp_set_allowed_congestion_control(char *val)
 {
        struct tcp_congestion_ops *ca;
-       char *clone, *name;
+       char *saved_clone, *clone, *name;
        int ret = 0;
 
-       clone = kstrdup(val, GFP_USER);
+       saved_clone = clone = kstrdup(val, GFP_USER);
        if (!clone)
                return -ENOMEM;
 
@@ -226,6 +226,7 @@ int tcp_set_allowed_congestion_control(char *val)
        }
 out:
        spin_unlock(&tcp_cong_list_lock);
+       kfree(saved_clone);
 
        return ret;
 }
index 808bb920c9f5e67249f6747c90cdeb7dddd9ddea..c35b469e851c298814d69583bd593ec1c580dde5 100644 (file)
@@ -66,18 +66,18 @@ static void tcp_write_err(struct sock *sk)
 static int tcp_out_of_resources(struct sock *sk, int do_reset)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int orphans = percpu_counter_read_positive(&tcp_orphan_count);
+       int shift = 0;
 
        /* If peer does not open window for long time, or did not transmit
         * anything for long time, penalize it. */
        if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
-               orphans <<= 1;
+               shift++;
 
        /* If some dubious ICMP arrived, penalize even more. */
        if (sk->sk_err_soft)
-               orphans <<= 1;
+               shift++;
 
-       if (tcp_too_many_orphans(sk, orphans)) {
+       if (tcp_too_many_orphans(sk, shift)) {
                if (net_ratelimit())
                        printk(KERN_INFO "Out of socket memory\n");
 
index 29a7bca29e3fddec24a55ea2e6ca228bc7a452ba..8e754be92c2450e7142a958466b493dc077f5772 100644 (file)
@@ -1766,6 +1766,9 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(ip6t_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 79986a674f6ea23329fb5a3960f1d3ece82b9a9d..fd55b5135de5aad91f547281a9c7a07c44f676b0 100644 (file)
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
        if (err < 0) {
-               kfree(self->ias_obj->name);
-               kfree(self->ias_obj);
+               irias_delete_object(self->ias_obj);
+               self->ias_obj = NULL;
                goto out;
        }
 
index 9616c32d1076dda982fff4c6da5c6e1057cf39d1..5bb8353105cca7c761647b94c346cc1da82c42e9 100644 (file)
@@ -169,6 +169,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
 {
        struct irlan_cb *self = netdev_priv(dev);
        int ret;
+       unsigned int len;
 
        /* skb headroom large enough to contain all IrDA-headers? */
        if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
@@ -188,6 +189,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
 
        dev->trans_start = jiffies;
 
+       len = skb->len;
        /* Now queue the packet in the transport layer */
        if (self->use_udata)
                ret = irttp_udata_request(self->tsap_data, skb);
@@ -209,7 +211,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
                self->stats.tx_dropped++;
        } else {
                self->stats.tx_packets++;
-               self->stats.tx_bytes += skb->len;
+               self->stats.tx_bytes += len;
        }
 
        return NETDEV_TX_OK;
index 58c6c4cda73b576cd65d3121a9abdf67bc6f7668..1ae697681bc735f3f4ed8cf98f9f63745157be36 100644 (file)
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
                printk("\n");
        }
 
-       if (data_len < ETH_HLEN)
+       if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
                goto error;
 
        secpath_reset(skb);
index 798a91b100cc277d154bde089eb6f4745f47b6b6..ded5c3843e061a13e1d82be4666b746c829c467c 100644 (file)
@@ -732,6 +732,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 
        rtnl_unlock();
 
+       /*
+        * Now all work items will be gone, but the
+        * timer might still be armed, so delete it
+        */
+       del_timer_sync(&local->work_timer);
+
        cancel_work_sync(&local->reconfig_filter);
 
        ieee80211_clear_tx_pending(local);
index f228a17ec6499b1440cae8a9dd9a1d8e312c4a00..33b329bfc2d24e6da813a1a6a546a96ea6b3bdbe 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
 #include <linux/gfp.h>
 #include <net/protocol.h>
@@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                buf_len = strlen(buf);
 
                ct = nf_ct_get(skb, &ctinfo);
-               if (ct && !nf_ct_is_untracked(ct)) {
+               if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) {
                        /* If mangling fails this function will return 0
                         * which will cause the packet to be dropped.
                         * Mangling can only fail under memory pressure,
index 8648a9922aabace0231de8eec18b88e30239fd5f..cd96ed3ccee4602a9fee20464e4a54d3fb0783b2 100644 (file)
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        struct netlink_sock *nlk = nlk_sk(sk);
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
-       struct sk_buff *skb, *frag __maybe_unused = NULL;
+       struct sk_buff *skb, *data_skb;
        int err;
 
        if (flags&MSG_OOB)
@@ -1418,45 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
+       data_skb = skb;
+
 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
        if (unlikely(skb_shinfo(skb)->frag_list)) {
-               bool need_compat = !!(flags & MSG_CMSG_COMPAT);
-
                /*
-                * If this skb has a frag_list, then here that means that
-                * we will have to use the frag_list skb for compat tasks
-                * and the regular skb for non-compat tasks.
+                * If this skb has a frag_list, then here that means that we
+                * will have to use the frag_list skb's data for compat tasks
+                * and the regular skb's data for normal (non-compat) tasks.
                 *
-                * The skb might (and likely will) be cloned, so we can't
-                * just reset frag_list and go on with things -- we need to
-                * keep that. For the compat case that's easy -- simply get
-                * a reference to the compat skb and free the regular one
-                * including the frag. For the non-compat case, we need to
-                * avoid sending the frag to the user -- so assign NULL but
-                * restore it below before freeing the skb.
+                * If we need to send the compat skb, assign it to the
+                * 'data_skb' variable so that it will be used below for data
+                * copying. We keep 'skb' for everything else, including
+                * freeing both later.
                 */
-               if (need_compat) {
-                       struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
-                       skb_get(compskb);
-                       kfree_skb(skb);
-                       skb = compskb;
-               } else {
-                       frag = skb_shinfo(skb)->frag_list;
-                       skb_shinfo(skb)->frag_list = NULL;
-               }
+               if (flags & MSG_CMSG_COMPAT)
+                       data_skb = skb_shinfo(skb)->frag_list;
        }
 #endif
 
        msg->msg_namelen = 0;
 
-       copied = skb->len;
+       copied = data_skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
                copied = len;
        }
 
-       skb_reset_transport_header(skb);
-       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       skb_reset_transport_header(data_skb);
+       err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
 
        if (msg->msg_name) {
                struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1476,11 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        }
        siocb->scm->creds = *NETLINK_CREDS(skb);
        if (flags & MSG_TRUNC)
-               copied = skb->len;
-
-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
-       skb_shinfo(skb)->frag_list = frag;
-#endif
+               copied = data_skb->len;
 
        skb_free_datagram(sk, skb);
 
@@ -2116,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net)
 #endif
 }
 
+static void __init netlink_add_usersock_entry(void)
+{
+       unsigned long *listeners;
+       int groups = 32;
+
+       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
+                           GFP_KERNEL);
+       if (!listeners)
+               panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+
+       netlink_table_grab();
+
+       nl_table[NETLINK_USERSOCK].groups = groups;
+       nl_table[NETLINK_USERSOCK].listeners = listeners;
+       nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+       nl_table[NETLINK_USERSOCK].registered = 1;
+
+       netlink_table_ungrab();
+}
+
 static struct pernet_operations __net_initdata netlink_net_ops = {
        .init = netlink_net_init,
        .exit = netlink_net_exit,
@@ -2164,6 +2170,8 @@ static int __init netlink_proto_init(void)
                hash->rehash_time = jiffies;
        }
 
+       netlink_add_usersock_entry();
+
        sock_register(&netlink_family_ops);
        register_pernet_subsys(&netlink_net_ops);
        /* The netlink device handler may be needed early. */
index 795a00b7f2cb7aa6a539adb3c86a2cad180575f1..c93588c2d553cf6b162ab500cf1fd72dbbc9c26c 100644 (file)
@@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 {
        struct rds_notifier *notifier;
-       struct rds_rdma_notify cmsg;
+       struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
        unsigned int count = 0, max_messages = ~0U;
        unsigned long flags;
        LIST_HEAD(copy);
index 537a48732e9e0b8513dd324801c342ec00cd3820..7ebf7439b478d24b4246527c7313fb8cb774496b 100644 (file)
@@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_police *police = a->priv;
-       struct tc_police opt;
-
-       opt.index = police->tcf_index;
-       opt.action = police->tcf_action;
-       opt.mtu = police->tcfp_mtu;
-       opt.burst = police->tcfp_burst;
-       opt.refcnt = police->tcf_refcnt - ref;
-       opt.bindcnt = police->tcf_bindcnt - bind;
+       struct tc_police opt = {
+               .index = police->tcf_index,
+               .action = police->tcf_action,
+               .mtu = police->tcfp_mtu,
+               .burst = police->tcfp_burst,
+               .refcnt = police->tcf_refcnt - ref,
+               .bindcnt = police->tcf_bindcnt - bind,
+       };
+
        if (police->tcfp_R_tab)
                opt.rate = police->tcfp_R_tab->rate;
-       else
-               memset(&opt.rate, 0, sizeof(opt.rate));
        if (police->tcfp_P_tab)
                opt.peakrate = police->tcfp_P_tab->rate;
-       else
-               memset(&opt.peakrate, 0, sizeof(opt.peakrate));
        NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
        if (police->tcfp_result)
                NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
index abd904be428717462395d62adb682023e64e17a5..47496098d35c2876cd0b5a88e1365922cc369ce9 100644 (file)
@@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len)
                if (f != cl->cl_f) {
                        cl->cl_f = f;
                        cftree_update(cl);
-                       update_cfmin(cl->cl_parent);
                }
+               update_cfmin(cl->cl_parent);
        }
 }
 
index 541e2fff5e9c5a0fab346fa55ac9f078630dcf43..d6d046b9f6f2d7d85f8a10680cf662519838e877 100644 (file)
@@ -475,12 +475,10 @@ int wiphy_register(struct wiphy *wiphy)
        mutex_lock(&cfg80211_mutex);
 
        res = device_add(&rdev->wiphy.dev);
-       if (res)
-               goto out_unlock;
-
-       res = rfkill_register(rdev->rfkill);
-       if (res)
-               goto out_rm_dev;
+       if (res) {
+               mutex_unlock(&cfg80211_mutex);
+               return res;
+       }
 
        /* set up regulatory info */
        wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
@@ -509,13 +507,18 @@ int wiphy_register(struct wiphy *wiphy)
        cfg80211_debugfs_rdev_add(rdev);
        mutex_unlock(&cfg80211_mutex);
 
+       /*
+        * due to a locking dependency this has to be outside of the
+        * cfg80211_mutex lock
+        */
+       res = rfkill_register(rdev->rfkill);
+       if (res)
+               goto out_rm_dev;
+
        return 0;
 
 out_rm_dev:
        device_del(&rdev->wiphy.dev);
-
-out_unlock:
-       mutex_unlock(&cfg80211_mutex);
        return res;
 }
 EXPORT_SYMBOL(wiphy_register);
index bb5e0a5ecfa1c2f999034bf4ceb34e69fbec855d..7e5c3a45f811d1a951ebee2486cfd8ffa70e5482 100644 (file)
@@ -1420,6 +1420,9 @@ int cfg80211_wext_giwessid(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
+       data->flags = 0;
+       data->length = 0;
+
        switch (wdev->iftype) {
        case NL80211_IFTYPE_ADHOC:
                return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
index 0ef17bc42bac05996eab6e2dada02fc67953c3dc..8f5116f5af19988555316eb5b97c7465a0890677 100644 (file)
@@ -782,6 +782,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
                }
        }
 
+       if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+               /*
+                * If this is a GET, but not NOMAX, it means that the extra
+                * data is not bounded by userspace, but by max_tokens. Thus
+                * set the length to max_tokens. This matches the extra data
+                * allocation.
+                * The driver should fill it with the number of tokens it
+                * provided, and it may check iwp->length rather than having
+                * knowledge of max_tokens. If the driver doesn't change the
+                * iwp->length, this ioctl just copies back max_token tokens
+                * filled with zeroes. Hopefully the driver isn't claiming
+                * them to be valid data.
+                */
+               iwp->length = descr->max_tokens;
+       }
+
        err = handler(dev, info, (union iwreq_data *) iwp, extra);
 
        iwp->length += essid_compat;
index b14ed4b1f27c3bd70f5837c2f032baf11a88e9a1..8bae6b22c8461c7a03fd15f86bd672a662b0b437 100644 (file)
@@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_user_expire *ue = nlmsg_data(nlh);
        struct xfrm_usersa_info *p = &ue->state;
        struct xfrm_mark m;
-       u32 mark = xfrm_mark_get(attrs, &m);;
+       u32 mark = xfrm_mark_get(attrs, &m);
 
        x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
 
index c39327e60ea499919937fae08a8d5bc5dc9c4fd9..515253fe46cfdb3d034f983090abfeff7c8c0aa2 100644 (file)
@@ -497,7 +497,9 @@ int conf_write_defconfig(const char *filename)
                        /*
                         * If symbol is a choice value and equals to the
                         * default for a choice - skip.
-                        * But only if value is bool and equal to "y" .
+                        * But only if value is bool and equal to "y" and
+                        * choice is not "optional".
+                        * (If choice is "optional" then all values can be "n")
                         */
                        if (sym_is_choice_value(sym)) {
                                struct symbol *cs;
@@ -505,7 +507,7 @@ int conf_write_defconfig(const char *filename)
 
                                cs = prop_get_symbol(sym_get_choice_prop(sym));
                                ds = sym_choice_default(cs);
-                               if (sym == ds) {
+                               if (!sym_is_optional(cs) && sym == ds) {
                                        if ((sym->type == S_BOOLEAN) &&
                                            sym_get_tristate_value(sym) == yes)
                                                goto next_menu;
index e95718fea3555de1b8db125b16add3cf9a350719..943712ca6c0a6a0eb5bbf97dbba2a2d20e495b47 100644 (file)
@@ -937,6 +937,8 @@ static void sym_check_print_recursive(struct symbol *last_sym)
                sym = stack->sym;
                next_sym = stack->next ? stack->next->sym : last_sym;
                prop = stack->prop;
+               if (prop == NULL)
+                       prop = stack->sym->prop;
 
                /* for choice values find the menu entry (used below) */
                if (sym_is_choice(sym) || sym_is_choice_value(sym)) {
index 67d59c7a18dc57b1580a833ed37e832eff227c85..5325423ceab483833c8f09efb37a336aaa8f9588 100644 (file)
@@ -44,7 +44,9 @@ all:
 
 Makefile:;
 
-\$(all) %/: all
+\$(all): all
        @:
 
+%/: all
+       @:
 EOF
index e90a91cc5185709d31a9a128b4790b676a35599e..057b6b3c5dfb4cf0677add9a7c6f04c5029d2e15 100755 (executable)
@@ -43,7 +43,7 @@ scm_version()
        fi
 
        # Check for git and a git repo.
-       if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+       if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
 
                # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
                # it, because this version is defined in the top level Makefile.
@@ -85,7 +85,7 @@ scm_version()
        fi
 
        # Check for mercurial and a mercurial repo.
-       if hgid=`hg id 2>/dev/null`; then
+       if test -d .hg && hgid=`hg id 2>/dev/null`; then
                tag=`printf '%s' "$hgid" | cut -s -d' ' -f2`
 
                # Do we have an untagged version?
index 3c88be94649408b412c0193f0bf4186273c6a972..02baec732bb512c77fed7d5ede247b72d788a147 100644 (file)
@@ -33,8 +33,8 @@ struct aa_rlimit {
 };
 
 int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+                     unsigned int resource, struct rlimit *new_rlim);
 
 void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
 
index 6e85cdb4303f69fc1911c94aefd5f1c5b695239c..506d2baf614797624fc4b9450c0d12c9f56e8ae4 100644 (file)
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
        *ns_name = NULL;
        if (name[0] == ':') {
                char *split = strchr(&name[1], ':');
+               *ns_name = skip_spaces(&name[1]);
                if (split) {
                        /* overwrite ':' with \0 */
                        *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
                } else
                        /* a ns name without a following profile is allowed */
                        name = NULL;
-               *ns_name = &name[1];
        }
        if (name && *name == 0)
                name = NULL;
index f73e2c2042185fff2d079dbdb3f4b8b828371e72..cf1de4462ccd3fb297f48bf351dd3494804f22c1 100644 (file)
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        int error = 0;
 
        if (!unconfined(profile))
-               error = aa_task_setrlimit(profile, resource, new_rlim);
+               error = aa_task_setrlimit(profile, task, resource, new_rlim);
 
        return error;
 }
index 19358dc14605bae1422ae00226291751695ba44c..82396050f18646ac0519352321637e930c05e367 100644 (file)
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
 {
        struct path root, tmp;
        char *res;
-       int deleted, connected;
-       int error = 0;
+       int connected, error = 0;
 
        /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
        spin_lock(&dcache_lock);
-       /* There is a race window between path lookup here and the
-        * need to strip the " (deleted) string that __d_path applies
-        * Detect the race and relookup the path
-        *
-        * The stripping of (deleted) is a hack that could be removed
-        * with an updated __d_path
-        */
-       do {
-               tmp = root;
-               deleted = d_unlinked(path->dentry);
-               res = __d_path(path, &tmp, buf, buflen);
-
-       } while (deleted != d_unlinked(path->dentry));
+       tmp = root;
+       res = __d_path(path, &tmp, buf, buflen);
        spin_unlock(&dcache_lock);
 
        *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
-       if (deleted) {
-               /* On some filesystems, newly allocated dentries appear to the
-                * security_path hooks as a deleted dentry except without an
-                * inode allocated.
-                *
-                * Remove the appended deleted text and return as string for
-                * normal mediation, or auditing.  The (deleted) string is
-                * guaranteed to be added in this case, so just strip it.
-                */
-               buf[buflen - 11] = 0;   /* - (len(" (deleted)") +\0) */
 
-               if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+       /* Handle two cases:
+        * 1. A deleted dentry && profile is not allowing mediation of deleted
+        * 2. On some filesystems, newly allocated dentries appear to the
+        *    security_path hooks as a deleted dentry except without an inode
+        *    allocated.
+        */
+       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+           !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
-               }
        }
 
        /* Determine if the path is connected to the expected root */
index 3cdc1ad0787ec9c4769455f8aeb004a417d246bf..52cc865f1464574e696fd28eca6a6e0eed326d68 100644 (file)
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                /* released below */
                ns = aa_get_namespace(root);
 
-       write_lock(&ns->lock);
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
+               write_lock(&ns->parent->lock);
                __remove_namespace(ns);
+               write_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
+               write_lock(&ns->lock);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                }
                name = profile->base.hname;
                __remove_profile(profile);
+               write_unlock(&ns->lock);
        }
-       write_unlock(&ns->lock);
 
        /* don't fail removal if audit fails */
        (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
index 4a368f1fd36ddf02af7204d30ee1b136f1d57bf7..a4136c10b1c6292edbdadae7285803583fb74241 100644 (file)
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
 /**
  * aa_task_setrlimit - test permission to set an rlimit
  * @profile - profile confining the task  (NOT NULL)
+ * @task - task the resource is being set on
  * @resource - the resource being set
  * @new_rlim - the new resource limit  (NOT NULL)
  *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
  *
  * Returns: 0 or error code if setting resource failed
  */
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+                     unsigned int resource, struct rlimit *new_rlim)
 {
        int error = 0;
 
-       if (profile->rlimits.mask & (1 << resource) &&
-           new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
-
-               error = audit_resource(profile, resource, new_rlim->rlim_max,
-                       -EACCES);
+       /* TODO: extend resource control to handle other (non current)
+        * processes.  AppArmor rules currently have the implicit assumption
+        * that the task is setting the resource of the current process
+        */
+       if ((task != current->group_leader) ||
+           (profile->rlimits.mask & (1 << resource) &&
+            new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+               error = -EACCES;
 
-       return error;
+       return audit_resource(profile, resource, new_rlim->rlim_max, error);
 }
 
 /**
index 16d100d3fc38de931de8e1a2679bc025359f3d31..3fbcd1dda0ef6e06da4a5b4b9c23a240a6378a35 100644 (file)
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
 
 /* set during initialization */
+extern int iint_initialized;
 extern int ima_initialized;
 extern int ima_used_chip;
 extern char *ima_hash;
index 7625b85c2274f457fc0d260a21e2d9039758d12c..afba4aef812f699134f7c7bc66c32251d2f12c69 100644 (file)
 
 RADIX_TREE(ima_iint_store, GFP_ATOMIC);
 DEFINE_SPINLOCK(ima_iint_lock);
-
 static struct kmem_cache *iint_cache __read_mostly;
 
+int iint_initialized = 0;
+
 /* ima_iint_find_get - return the iint associated with an inode
  *
  * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
        iint_cache =
            kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
                              SLAB_PANIC, init_once);
+       iint_initialized = 1;
        return 0;
 }
 security_initcall(ima_iintcache_init);
index f93641382e9f9483576578a3ba5f41286f4cc3ab..e662b89d407944103dc121b9ccb37f7e68ac62e1 100644 (file)
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
        struct ima_iint_cache *iint;
        int rc;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
                return;
        mutex_lock(&iint->mutex);
+       if (!ima_initialized)
+               goto out;
        rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
        if (rc < 0)
                goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
-       int rc;
+       int rc = 0;
 
        if (!ima_initialized || !S_ISREG(inode->i_mode))
                return 0;
index cbe815dfbdc8c3bd3f417ba960e7aee37cf5a0e9..204af48c5cc17f2f59632bb2089aa03c7b593664 100644 (file)
@@ -203,10 +203,16 @@ static char *snd_pcm_format_names[] = {
        FORMAT(S18_3BE),
        FORMAT(U18_3LE),
        FORMAT(U18_3BE),
+       FORMAT(G723_24),
+       FORMAT(G723_24_1B),
+       FORMAT(G723_40),
+       FORMAT(G723_40_1B),
 };
 
 const char *snd_pcm_format_name(snd_pcm_format_t format)
 {
+       if (format >= ARRAY_SIZE(snd_pcm_format_names))
+               return "Unknown";
        return snd_pcm_format_names[format];
 }
 EXPORT_SYMBOL_GPL(snd_pcm_format_name);
index eb68326c37d47626b53d7970fe6e88027edc0e13..a7868ad4d530fd40b3a9bd608bd7860a7d35ce44 100644 (file)
@@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
                
                if (get_user(device, (int __user *)argp))
                        return -EFAULT;
+               if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
+                       device = SNDRV_RAWMIDI_DEVICES - 1;
                mutex_lock(&register_mutex);
                device = device < 0 ? 0 : device + 1;
                while (device < SNDRV_RAWMIDI_DEVICES) {
index 685712276ac95ab0d57985489b86f4f840bcf9b1..69cd7b3c362d19f4b0ac989b8bc107e49e59ff1b 100644 (file)
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level)
        return 0;
 
  _error:
-       snd_seq_oss_writeq_delete(dp->writeq);
-       snd_seq_oss_readq_delete(dp->readq);
        snd_seq_oss_synth_cleanup(dp);
        snd_seq_oss_midi_cleanup(dp);
-       delete_port(dp);
        delete_seq_queue(dp->queue);
-       kfree(dp);
+       delete_port(dp);
 
        return rc;
 }
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp)
 static int
 delete_port(struct seq_oss_devinfo *dp)
 {
-       if (dp->port < 0)
+       if (dp->port < 0) {
+               kfree(dp);
                return 0;
+       }
 
        debug_printk(("delete_port %i\n", dp->port));
        return snd_seq_event_port_detach(dp->cseq, dp->port);
index 5f3e68401f905dbf2b8a03bc12fd657450b739d4..91d6023a63e57c6b14227e158c171b8edf4b8087 100644 (file)
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
 static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
+#ifndef MSND_CLASSIC
 static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
-#ifndef MSND_CLASSIC
 /* Extra Peripheral Configuration (Default: Disable) */
 static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
        struct snd_card *card;
        struct snd_msnd *chip;
 
-       if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) {
+       if (has_isapnp(idx)
+#ifndef MSND_CLASSIC
+           || cfg[idx] == SNDRV_AUTO_PORT
+#endif
+           ) {
                printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
                return -ENODEV;
        }
index f0f0c19fbff7a75c4e5ad561d18ce9f970775e97..48cda6c4c257b34d191d3512ff52658de4329669 100644 (file)
@@ -26,7 +26,7 @@ static unsigned long prev_event_time;
 static volatile unsigned long usecs_per_tmr;   /* Length of the current interval */
 
 static struct sound_lowlev_timer *tmr;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
 
 static unsigned long tmr2ticks(int tmr_value)
 {
index 3b44134482266ff5f17ddeb301b258846a34eb77..22c5fc6255335ac94aca5ac297432f16b0950c4e 100644 (file)
@@ -941,8 +941,7 @@ static void outstream_host_buffer_free(struct hpi_adapter_obj *pao,
 
 }
 
-static u32 outstream_get_space_available(struct hpi_hostbuffer_status
-       *status)
+static u32 outstream_get_space_available(struct hpi_hostbuffer_status *status)
 {
        return status->size_in_bytes - (status->host_index -
                status->dSP_index);
@@ -987,6 +986,10 @@ static void outstream_write(struct hpi_adapter_obj *pao,
                /* write it */
                phm->function = HPI_OSTREAM_WRITE;
                hw_message(pao, phm, phr);
+
+               if (phr->error)
+                       return;
+
                /* update status information that the DSP would typically
                 * update (and will update next time the DSP
                 * buffer update task reads data from the host BBM buffer)
index dd8fb86c842b85feef9a3018f2dcf42b2a06dba4..14829210ef0bf4b1bac018d4f5e31a1d1c4728b9 100644 (file)
@@ -589,6 +589,7 @@ int /*__devinit*/ snd_hda_bus_new(struct snd_card *card,
        bus->ops = temp->ops;
 
        mutex_init(&bus->cmd_mutex);
+       mutex_init(&bus->prepare_mutex);
        INIT_LIST_HEAD(&bus->codec_list);
 
        snprintf(bus->workq_name, sizeof(bus->workq_name),
@@ -1068,7 +1069,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
        codec->addr = codec_addr;
        mutex_init(&codec->spdif_mutex);
        mutex_init(&codec->control_mutex);
-       mutex_init(&codec->prepare_mutex);
        init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
        init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
        snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32);
@@ -1213,6 +1213,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                                u32 stream_tag,
                                int channel_id, int format)
 {
+       struct hda_codec *c;
        struct hda_cvt_setup *p;
        unsigned int oldval, newval;
        int i;
@@ -1253,10 +1254,12 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
        p->dirty = 0;
 
        /* make other inactive cvts with the same stream-tag dirty */
-       for (i = 0; i < codec->cvt_setups.used; i++) {
-               p = snd_array_elem(&codec->cvt_setups, i);
-               if (!p->active && p->stream_tag == stream_tag)
-                       p->dirty = 1;
+       list_for_each_entry(c, &codec->bus->codec_list, list) {
+               for (i = 0; i < c->cvt_setups.used; i++) {
+                       p = snd_array_elem(&c->cvt_setups, i);
+                       if (!p->active && p->stream_tag == stream_tag)
+                               p->dirty = 1;
+               }
        }
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream);
@@ -1306,12 +1309,16 @@ static void really_cleanup_stream(struct hda_codec *codec,
 /* clean up the all conflicting obsolete streams */
 static void purify_inactive_streams(struct hda_codec *codec)
 {
+       struct hda_codec *c;
        int i;
 
-       for (i = 0; i < codec->cvt_setups.used; i++) {
-               struct hda_cvt_setup *p = snd_array_elem(&codec->cvt_setups, i);
-               if (p->dirty)
-                       really_cleanup_stream(codec, p);
+       list_for_each_entry(c, &codec->bus->codec_list, list) {
+               for (i = 0; i < c->cvt_setups.used; i++) {
+                       struct hda_cvt_setup *p;
+                       p = snd_array_elem(&c->cvt_setups, i);
+                       if (p->dirty)
+                               really_cleanup_stream(c, p);
+               }
        }
 }
 
@@ -3502,11 +3509,11 @@ int snd_hda_codec_prepare(struct hda_codec *codec,
                          struct snd_pcm_substream *substream)
 {
        int ret;
-       mutex_lock(&codec->prepare_mutex);
+       mutex_lock(&codec->bus->prepare_mutex);
        ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream);
        if (ret >= 0)
                purify_inactive_streams(codec);
-       mutex_unlock(&codec->prepare_mutex);
+       mutex_unlock(&codec->bus->prepare_mutex);
        return ret;
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_prepare);
@@ -3515,9 +3522,9 @@ void snd_hda_codec_cleanup(struct hda_codec *codec,
                           struct hda_pcm_stream *hinfo,
                           struct snd_pcm_substream *substream)
 {
-       mutex_lock(&codec->prepare_mutex);
+       mutex_lock(&codec->bus->prepare_mutex);
        hinfo->ops.cleanup(hinfo, codec, substream);
-       mutex_unlock(&codec->prepare_mutex);
+       mutex_unlock(&codec->bus->prepare_mutex);
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup);
 
@@ -4529,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
                        cfg->hp_outs--;
                        memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
                                sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
-                       memmove(sequences_hp + i - 1, sequences_hp + i,
+                       memmove(sequences_hp + i, sequences_hp + i + 1,
                                sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
                }
        }
index 4303353feda99af99b63b63ed0a9bb6a404f8fdc..62c70224010808dca1b7a8330e28dfa791bbe61e 100644 (file)
@@ -648,6 +648,7 @@ struct hda_bus {
        struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
 
        struct mutex cmd_mutex;
+       struct mutex prepare_mutex;
 
        /* unsolicited event queue */
        struct hda_bus_unsolicited *unsol;
@@ -826,7 +827,6 @@ struct hda_codec {
 
        struct mutex spdif_mutex;
        struct mutex control_mutex;
-       struct mutex prepare_mutex;
        unsigned int spdif_status;      /* IEC958 status bits */
        unsigned short spdif_ctls;      /* SPDIF control bits */
        unsigned int spdif_in_enable;   /* SPDIF input enable? */
index 803b298f741101065b5966f17a84cddf01028ef6..26c3ade735838624c9f6d2ca889b4ca724df96db 100644 (file)
@@ -596,6 +596,8 @@ void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
 }
 EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free);
 
+#endif /* CONFIG_PROC_FS */
+
 /* update PCM info based on ELD */
 void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
                              struct hda_pcm_stream *codec_pars)
@@ -644,5 +646,3 @@ void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
        pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps);
 }
 EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info);
-
-#endif /* CONFIG_PROC_FS */
index 4ef5efaaaef1a81d29290d5d9618097634957c10..488fd9ade1ba2bf7b306bf6e48ae106ef823294b 100644 (file)
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+/* Errata: CS4207 rev C0/C1/C2 Silicon
+ *
+ * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
+ *
+ * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
+ * may be excessive (up to an additional 200 Î¼A), which is most easily
+ * observed while the part is being held in reset (RESET# active low).
+ *
+ * Root Cause: At initial powerup of the device, the logic that drives
+ * the clock and write enable to the S/PDIF SRC RAMs is not properly
+ * initialized.
+ * Certain random patterns will cause a steady leakage current in those
+ * RAM cells. The issue will resolve once the SRCs are used (turned on).
+ *
+ * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
+ * blocks, which will alleviate the issue.
+ */
+
+static struct hda_verb cs_errata_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x11, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
+       {0x11, AC_VERB_SET_PROC_STATE, 0x00},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
+       /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+
+       {} /* terminator */
+};
+
 /* SPDIF setup */
 static void init_digital(struct hda_codec *codec)
 {
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
 
+       /* init_verb sequence for C0/C1/C2 errata*/
+       snd_hda_sequence_write(codec, cs_errata_init_verbs);
+
        snd_hda_sequence_write(codec, cs_coef_init_verbs);
 
        if (spec->gpio_mask) {
index c424952a734e0e48546518fe6b47ccbf959e9aee..71f9d6475b09e2b4d52d38b60130918dff8235b2 100644 (file)
@@ -116,6 +116,7 @@ struct conexant_spec {
        unsigned int dell_vostro:1;
        unsigned int ideapad:1;
        unsigned int thinkpad:1;
+       unsigned int hp_laptop:1;
 
        unsigned int ext_mic_present;
        unsigned int recording;
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
        }
 }
 
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
+{
+       unsigned int present;
+
+       present = snd_hda_jack_detect(codec, 0x1b);
+       snd_printdd("CXT5066: external microphone present=%d\n", present);
+       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+                           present ? 1 : 3);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately
    order is: external mic -> dock mic -> interal mic */
 static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2407,6 +2420,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
        }
 }
 
+/* unsolicited event for jack sensing */
+static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+{
+       snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+       switch (res >> 26) {
+       case CONEXANT_HP_EVENT:
+               cxt5066_hp_automute(codec);
+               break;
+       case CONEXANT_MIC_EVENT:
+               cxt5066_hp_laptop_automic(codec);
+               break;
+       }
+}
+
 /* unsolicited event for jack sensing */
 static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
 {
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
        { } /* end */
 };
 
+
+static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
+       {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
+       {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+       {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+       { } /* end */
+};
+
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec)
                        cxt5066_ideapad_automic(codec);
                else if (spec->thinkpad)
                        cxt5066_thinkpad_automic(codec);
+               else if (spec->hp_laptop)
+                       cxt5066_hp_laptop_automic(codec);
        }
        cxt5066_set_mic_boost(codec);
        return 0;
@@ -3031,6 +3068,7 @@ enum {
        CXT5066_DELL_VOSTO,     /* Dell Vostro 1015i */
        CXT5066_IDEAPAD,        /* Lenovo IdeaPad U150 */
        CXT5066_THINKPAD,       /* Lenovo ThinkPad T410s, others? */
+       CXT5066_HP_LAPTOP,      /* HP Laptop */
        CXT5066_MODELS
 };
 
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
        [CXT5066_DELL_VOSTO]    = "dell-vostro",
        [CXT5066_IDEAPAD]       = "ideapad",
        [CXT5066_THINKPAD]      = "thinkpad",
+       [CXT5066_HP_LAPTOP]     = "hp-laptop",
 };
 
 static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,13 +3091,16 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
        SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
        {}
@@ -3115,6 +3157,23 @@ static int patch_cxt5066(struct hda_codec *codec)
                spec->num_init_verbs++;
                spec->dell_automute = 1;
                break;
+       case CXT5066_HP_LAPTOP:
+               codec->patch_ops.init = cxt5066_init;
+               codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+               spec->init_verbs[spec->num_init_verbs] =
+                       cxt5066_init_verbs_hp_laptop;
+               spec->num_init_verbs++;
+               spec->hp_laptop = 1;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+               /* no S/PDIF out */
+               spec->multiout.dig_out_nid = 0;
+               /* input source automatically selected */
+               spec->input_mux = NULL;
+               spec->port_d_mode = 0;
+               spec->mic_boost = 3; /* default 30dB gain */
+               break;
+
        case CXT5066_OLPC_XO_1_5:
                codec->patch_ops.init = cxt5066_olpc_init;
                codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
index 2bc0f07cf33fe4f67720fc3c9487a817b3e77823..afd6022a96a75bda3b24b652973dccbe591c625f 100644 (file)
@@ -707,8 +707,6 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                              u32 stream_tag, int format)
 {
        struct hdmi_spec *spec = codec->spec;
-       int tag;
-       int fmt;
        int pinctl;
        int new_pinctl = 0;
        int i;
@@ -745,24 +743,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                return -EINVAL;
        }
 
-       tag = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0) >> 4;
-       fmt = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0);
-
-       snd_printdd("hdmi_setup_stream: "
-                   "NID=0x%x, %sstream=0x%x, %sformat=0x%x\n",
-                   nid,
-                   tag == stream_tag ? "" : "new-",
-                   stream_tag,
-                   fmt == format ? "" : "new-",
-                   format);
-
-       if (tag != stream_tag)
-               snd_hda_codec_write(codec, nid, 0,
-                                   AC_VERB_SET_CHANNEL_STREAMID,
-                                   stream_tag << 4);
-       if (fmt != format)
-               snd_hda_codec_write(codec, nid, 0,
-                                   AC_VERB_SET_STREAM_FORMAT, format);
+       snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format);
        return 0;
 }
 
index d382d3c81c0fc5bb0f320fcad17b96d67f6ef473..36a9b83a6174d93f703bf76795e7ba140901c739 100644 (file)
@@ -69,20 +69,12 @@ static int intel_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format);
 }
 
-static int intel_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                          struct hda_codec *codec,
-                                          struct snd_pcm_substream *substream)
-{
-       return 0;
-}
-
 static struct hda_pcm_stream intel_hdmi_pcm_playback = {
        .substreams = 1,
        .channels_min = 2,
        .ops = {
                .open = hdmi_pcm_open,
                .prepare = intel_hdmi_playback_pcm_prepare,
-               .cleanup = intel_hdmi_playback_pcm_cleanup,
        },
 };
 
index f636870dc718d4445ed400924f3075b11b186159..69b950d527c31d966846a3c6e8f0ff30bdbe886c 100644 (file)
@@ -326,13 +326,6 @@ static int nvhdmi_dig_playback_pcm_prepare_8ch(struct hda_pcm_stream *hinfo,
        return 0;
 }
 
-static int nvhdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                          struct hda_codec *codec,
-                                          struct snd_pcm_substream *substream)
-{
-       return 0;
-}
-
 static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo,
                                        struct hda_codec *codec,
                                        unsigned int stream_tag,
@@ -350,7 +343,6 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch_89 = {
        .ops = {
                .open = hdmi_pcm_open,
                .prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89,
-               .cleanup = nvhdmi_playback_pcm_cleanup,
        },
 };
 
index a4dd04524e4391ce7d47f76196b6eeca5e6e9f85..bcbf9160ed81af7a805007f152067908f9970840 100644 (file)
@@ -5334,6 +5334,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
 
 static struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
        SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
        {}
 };
@@ -14467,6 +14468,7 @@ static const struct alc_fixup alc269_fixups[] = {
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+       SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        {}
 };
 
index f3f861bd1bf880f650d4361df30092f2b034d09c..95148e58026cfb045793d3ba26d570f26108c284 100644 (file)
@@ -6303,6 +6303,21 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx },
        { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
        { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
+       { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
        {} /* terminator */
 };
 
index 6433e65c9507d20a65be6c0b14af1275c30e0c40..46774924957643874171c1c4e0dfd9a20672d2f3 100644 (file)
@@ -1774,6 +1774,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "HP/Compaq nx7010",
                .type = AC97_TUNE_MUTE_LED
         },
+       {
+               .subvendor = 0x1014,
+               .subdevice = 0x0534,
+               .name = "ThinkPad X31",
+               .type = AC97_TUNE_INV_EAPD
+       },
        {
                .subvendor = 0x1014,
                .subdevice = 0x1f00,
index 6147216af74412f5ff47af36b04ce9fd51013f6b..a3409edcfb5094791c33563f8ad3911826ce3e26 100644 (file)
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci);
 int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
 int oxygen_pci_resume(struct pci_dev *pci);
 #endif
+void oxygen_pci_shutdown(struct pci_dev *pci);
 
 /* oxygen_mixer.c */
 
index fad03d64e3ad0c936aca79a257f6c484b067f657..7e93cf884437d0b5844c9515250c49994390e54c 100644 (file)
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip)
        }
 }
 
-static void oxygen_card_free(struct snd_card *card)
+static void oxygen_shutdown(struct oxygen *chip)
 {
-       struct oxygen *chip = card->private_data;
-
        spin_lock_irq(&chip->reg_lock);
        chip->interrupt_mask = 0;
        chip->pcm_running = 0;
        oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
        oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
        spin_unlock_irq(&chip->reg_lock);
+}
+
+static void oxygen_card_free(struct snd_card *card)
+{
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
        flush_scheduled_work();
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci)
 }
 EXPORT_SYMBOL(oxygen_pci_resume);
 #endif /* CONFIG_PM */
+
+void oxygen_pci_shutdown(struct pci_dev *pci)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
+       chip->model.cleanup(chip);
+}
+EXPORT_SYMBOL(oxygen_pci_shutdown);
index f03a2f2cffee88911e9c11db08c5dd7dd8087b55..06c863e86e3d3d24a5dfd0f7b43f56a47b120719 100644 (file)
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = {
        .suspend = oxygen_pci_suspend,
        .resume = oxygen_pci_resume,
 #endif
+       .shutdown = oxygen_pci_shutdown,
 };
 
 static int __init alsa_card_xonar_init(void)
index dbc4b89d74e43ffa3311af7c9386fb5aae38d9a9..b82c1cfa96f5334554435a6eeebc6e82fa63857d 100644 (file)
@@ -53,6 +53,8 @@ struct xonar_wm87x6 {
        struct xonar_generic generic;
        u16 wm8776_regs[0x17];
        u16 wm8766_regs[0x10];
+       struct snd_kcontrol *line_adcmux_control;
+       struct snd_kcontrol *mic_adcmux_control;
        struct snd_kcontrol *lc_controls[13];
 };
 
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip)
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
        xonar_disable_output(chip);
+       wm8776_write(chip, WM8776_RESET, 0);
 }
 
 static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
 {
        struct oxygen *chip = ctl->private_data;
        struct xonar_wm87x6 *data = chip->model_data;
+       struct snd_kcontrol *other_ctl;
        unsigned int mux_bit = ctl->private_value;
        u16 reg;
        int changed;
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
        mutex_lock(&chip->mutex);
        reg = data->wm8776_regs[WM8776_ADCMUX];
        if (value->value.integer.value[0]) {
-               reg &= ~0x003;
                reg |= mux_bit;
+               /* line-in and mic-in are exclusive */
+               mux_bit ^= 3;
+               if (reg & mux_bit) {
+                       reg &= ~mux_bit;
+                       if (mux_bit == 1)
+                               other_ctl = data->line_adcmux_control;
+                       else
+                               other_ctl = data->mic_adcmux_control;
+                       snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+                                      &other_ctl->id);
+               }
        } else
                reg &= ~mux_bit;
        changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip)
                err = snd_ctl_add(chip->card, ctl);
                if (err < 0)
                        return err;
+               if (!strcmp(ctl->id.name, "Line Capture Switch"))
+                       data->line_adcmux_control = ctl;
+               else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
+                       data->mic_adcmux_control = ctl;
        }
+       if (!data->line_adcmux_control || !data->mic_adcmux_control)
+               return -ENXIO;
        BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
        for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
                ctl = snd_ctl_new1(&lc_controls[i], chip);
index a11daa1e905b3552ed2c1a8dc178e29e5491cca6..c81da05a4f11ee0ada1ad170dea17aea207eb969 100644 (file)
@@ -254,6 +254,9 @@ static int imx_ssi_hw_params(struct snd_pcm_substream *substream,
                dma_data = &ssi->dma_params_rx;
        }
 
+       if (ssi->flags & IMX_SSI_SYN)
+               reg = SSI_STCCR;
+
        snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
 
        sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK;
index 844ae8221a3af65be95dcd848f925b26651a0640..acc91daa1c5509df6a7844f684a0640ff7198109 100644 (file)
@@ -251,7 +251,7 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
                printk(KERN_WARNING
                       "ASoC: Failed to create codec register debugfs file\n");
 
-       codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0744,
+       codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
                                                     codec->debugfs_codec_root,
                                                     &codec->pop_time);
        if (!codec->debugfs_pop_time)
index 9feb00c831a02b791227b75294e6dc8e15975f1f..4eabafa5b037db66b250db64cc0ce05aa783f595 100644 (file)
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head)
        for (idx = 0; idx < 2; idx++) {
                subs = &as->substream[idx];
                if (!subs->num_formats)
-                       return;
+                       continue;
                snd_usb_release_substream_urbs(subs, 1);
                subs->interface = -1;
        }
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        }
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
+                           protocol);
+               /* fall through */
+
        case UAC_VERSION_1: {
                struct uac1_ac_header_descriptor *h1 = control_header;
 
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
 
                break;
        }
-
-       default:
-               snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
-               return -EINVAL;
        }
 
        return 0;
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
                        goto __error;
        }
 
-       chip->ctrl_intf = alts;
+       /*
+        * For devices with more than one control interface, we assume the
+        * first contains the audio controls. We might need a more specific
+        * check here in the future.
+        */
+       if (!chip->ctrl_intf)
+               chip->ctrl_intf = alts;
 
        if (err > 0) {
                /* create normal USB audio interfaces */
index b853f8df794f6a35ceb8e39576abf3d1cb91e36c..7754a10345451109a9ff9bd03d271148160861b9 100644 (file)
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return set_sample_rate_v1(chip, iface, alts, fmt, rate);
 
        case UAC_VERSION_2:
                return set_sample_rate_v2(chip, iface, alts, fmt, rate);
        }
-
-       return -EINVAL;
 }
 
index 1a701f1e8f501b358029f84bedcbe1268e9f543f..ef0a07e34844ae4d53ffd12e54b16a203da02a19 100644 (file)
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
 
                /* get audio formats */
                switch (protocol) {
+               default:
+                       snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
+                                   dev->devnum, iface_no, altno, protocol);
+                       protocol = UAC_VERSION_1;
+                       /* fall through */
+
                case UAC_VERSION_1: {
                        struct uac1_as_header_descriptor *as =
                                snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
                                   dev->devnum, iface_no, altno, as->bTerminalLink);
                        continue;
                }
-
-               default:
-                       snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
-                                  dev->devnum, iface_no, altno, protocol);
-                       continue;
                }
 
                /* get format type */
index 3a1375459c06a49d0d48a309460af30a0fcfd8c0..69148212aa70e66f9e1193040bfb41aad5548b6b 100644 (file)
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
        u64 pcm_formats;
 
        switch (protocol) {
-       case UAC_VERSION_1: {
+       case UAC_VERSION_1:
+       default: {
                struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
                sample_width = fmt->bBitResolution;
                sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
                format <<= 1;
                break;
        }
-
-       default:
-               return -EINVAL;
        }
 
        pcm_formats = 0;
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
         * audio class v2 uses class specific EP0 range requests for that.
         */
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1:
                fp->channels = fmt->bNrChannels;
                ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
                /* fp->channels is already set in this case */
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        if (fp->channels < 1) {
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
        fp->channels = 1;
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1: {
                struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
                brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
        }
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        return ret;
index c166db0057d3e0e613954b21f8ca5274f0242741..3ed3901369ce1a9ca58473ecef450623cf95a7b2 100644 (file)
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
        }
 
        host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
-       mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
+       switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
+       case UAC_VERSION_1:
+       default:
+               mixer->protocol = UAC_VERSION_1;
+               break;
+       case UAC_VERSION_2:
+               mixer->protocol = UAC_VERSION_2;
+               break;
+       }
 
        if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
            (err = snd_usb_mixer_status_create(mixer)) < 0)
index 3634cedf93061629619125690fb19dc7d0beae64..3b5135c930628fc092cad54e67ce2a7645c59c02 100644 (file)
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return init_pitch_v1(chip, iface, alts, fmt);
 
        case UAC_VERSION_2:
                return init_pitch_v2(chip, iface, alts, fmt);
        }
-
-       return -EINVAL;
 }
 
 /*
index 624a96c636fdbc36a472ecaadf5fcb72c226bf38..6de4313924fb5c510f354cb3a8e2a73061e103f6 100644 (file)
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
 
+       node->children_hit = 0;
        node->parent = NULL;
        node->hit = 0;
 }