]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 14:53:04 +0000 (07:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 14:53:04 +0000 (07:53 -0700)
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  cciss: remove overlapping PCI IDs
  block: cciss: fix information leak to userland
  drivers/block/aoe/aoeblk.c: ratelimit a warning printk
  drivers/block/z2ram.c: correct printing of sector_t
  aoe: don't use flush_scheduled_work()
  drivers/block/drbd/drbd_main.c: fix error path
  loop: Properly clear sysfs in autoclear mode

634 files changed:
Documentation/accounting/getdelays.c
Documentation/cgroups/cgroups.txt
Documentation/fb/viafb.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/ext4.txt
Documentation/filesystems/proc.txt
Documentation/networking/phy.txt
Documentation/sysctl/vm.txt
Kbuild
MAINTAINERS
arch/alpha/kernel/pci_iommu.c
arch/alpha/kernel/ptrace.c
arch/arm/kernel/ptrace.c
arch/arm/mach-imx/include/mach/dma-v1.h
arch/arm/mach-tegra/timer.c
arch/arm/mach-ux500/devices-db8500.c
arch/arm/mm/highmem.c
arch/arm/plat-mxc/include/mach/dma.h [new file with mode: 0644]
arch/arm/plat-mxc/include/mach/sdma.h [new file with mode: 0644]
arch/arm/plat-nomadik/include/plat/ste_dma40.h
arch/avr32/kernel/ptrace.c
arch/blackfin/kernel/ptrace.c
arch/cris/arch-v10/kernel/ptrace.c
arch/cris/arch-v32/kernel/ptrace.c
arch/frv/kernel/ptrace.c
arch/frv/mm/highmem.c
arch/h8300/kernel/ptrace.c
arch/ia64/include/asm/cputime.h
arch/ia64/kernel/ptrace.c
arch/m32r/kernel/ptrace.c
arch/m68k/kernel/ptrace.c
arch/m68knommu/kernel/ptrace.c
arch/microblaze/kernel/ptrace.c
arch/mips/include/asm/pci/bridge.h
arch/mips/kernel/ptrace.c
arch/mips/mm/highmem.c
arch/mn10300/Kconfig
arch/mn10300/Makefile
arch/mn10300/boot/compressed/head.S
arch/mn10300/configs/asb2303_defconfig
arch/mn10300/configs/asb2364_defconfig [new file with mode: 0644]
arch/mn10300/include/asm/atomic.h
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/cache.h
arch/mn10300/include/asm/cacheflush.h
arch/mn10300/include/asm/cpu-regs.h
arch/mn10300/include/asm/dmactl-regs.h
arch/mn10300/include/asm/elf.h
arch/mn10300/include/asm/exceptions.h
arch/mn10300/include/asm/fpu.h
arch/mn10300/include/asm/frame.inc
arch/mn10300/include/asm/gdb-stub.h
arch/mn10300/include/asm/hardirq.h
arch/mn10300/include/asm/highmem.h
arch/mn10300/include/asm/intctl-regs.h
arch/mn10300/include/asm/io.h
arch/mn10300/include/asm/irq.h
arch/mn10300/include/asm/irq_regs.h
arch/mn10300/include/asm/irqflags.h
arch/mn10300/include/asm/mmu_context.h
arch/mn10300/include/asm/pgalloc.h
arch/mn10300/include/asm/pgtable.h
arch/mn10300/include/asm/processor.h
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/reset-regs.h
arch/mn10300/include/asm/rtc.h
arch/mn10300/include/asm/rwlock.h [new file with mode: 0644]
arch/mn10300/include/asm/serial-regs.h
arch/mn10300/include/asm/serial.h
arch/mn10300/include/asm/smp.h
arch/mn10300/include/asm/smsc911x.h [new file with mode: 0644]
arch/mn10300/include/asm/spinlock.h
arch/mn10300/include/asm/spinlock_types.h [new file with mode: 0644]
arch/mn10300/include/asm/system.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timer-regs.h
arch/mn10300/include/asm/timex.h
arch/mn10300/include/asm/tlbflush.h
arch/mn10300/include/asm/uaccess.h
arch/mn10300/kernel/Makefile
arch/mn10300/kernel/asm-offsets.c
arch/mn10300/kernel/cevt-mn10300.c [new file with mode: 0644]
arch/mn10300/kernel/csrc-mn10300.c [new file with mode: 0644]
arch/mn10300/kernel/entry.S
arch/mn10300/kernel/fpu-low.S
arch/mn10300/kernel/fpu-nofpu-low.S [new file with mode: 0644]
arch/mn10300/kernel/fpu-nofpu.c [new file with mode: 0644]
arch/mn10300/kernel/fpu.c
arch/mn10300/kernel/gdb-io-serial-low.S
arch/mn10300/kernel/gdb-io-serial.c
arch/mn10300/kernel/gdb-io-ttysm.c
arch/mn10300/kernel/gdb-stub.c
arch/mn10300/kernel/head.S
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/kprobes.c
arch/mn10300/kernel/mn10300-serial-low.S
arch/mn10300/kernel/mn10300-serial.c
arch/mn10300/kernel/mn10300-watchdog-low.S
arch/mn10300/kernel/mn10300-watchdog.c
arch/mn10300/kernel/process.c
arch/mn10300/kernel/profile.c
arch/mn10300/kernel/ptrace.c
arch/mn10300/kernel/rtc.c
arch/mn10300/kernel/setup.c
arch/mn10300/kernel/signal.c
arch/mn10300/kernel/smp-low.S [new file with mode: 0644]
arch/mn10300/kernel/smp.c [new file with mode: 0644]
arch/mn10300/kernel/switch_to.S
arch/mn10300/kernel/time.c
arch/mn10300/kernel/traps.c
arch/mn10300/lib/bitops.c
arch/mn10300/lib/delay.c
arch/mn10300/lib/do_csum.S
arch/mn10300/mm/Kconfig.cache [new file with mode: 0644]
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-flush-by-reg.S [new file with mode: 0644]
arch/mn10300/mm/cache-flush-by-tag.S [new file with mode: 0644]
arch/mn10300/mm/cache-flush-icache.c [new file with mode: 0644]
arch/mn10300/mm/cache-flush-mn10300.S [deleted file]
arch/mn10300/mm/cache-inv-by-reg.S [new file with mode: 0644]
arch/mn10300/mm/cache-inv-by-tag.S [new file with mode: 0644]
arch/mn10300/mm/cache-inv-icache.c [new file with mode: 0644]
arch/mn10300/mm/cache-mn10300.S [deleted file]
arch/mn10300/mm/cache-smp-flush.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp-inv.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp.h [new file with mode: 0644]
arch/mn10300/mm/cache.c
arch/mn10300/mm/fault.c
arch/mn10300/mm/init.c
arch/mn10300/mm/misalignment.c
arch/mn10300/mm/mmu-context.c
arch/mn10300/mm/pgtable.c
arch/mn10300/mm/tlb-mn10300.S
arch/mn10300/mm/tlb-smp.c [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/cache.h
arch/mn10300/proc-mn103e010/include/proc/clock.h
arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/proc.h
arch/mn10300/proc-mn103e010/proc-init.c
arch/mn10300/proc-mn2ws0050/Makefile [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/cache.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/clock.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/irq.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/proc.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/proc-init.c [new file with mode: 0644]
arch/mn10300/unit-asb2303/include/unit/clock.h
arch/mn10300/unit-asb2303/include/unit/serial.h
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/unit-init.c
arch/mn10300/unit-asb2305/include/unit/clock.h
arch/mn10300/unit-asb2305/include/unit/serial.h
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/pci-asb2305.c
arch/mn10300/unit-asb2305/pci.c
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/Makefile [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/clock.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/fpga-regs.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/irq.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/leds.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/serial.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/smsc911x.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/timex.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/irq-fpga.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/leds.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/smsc911x.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/unit-init.c [new file with mode: 0644]
arch/parisc/kernel/ptrace.c
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/fsldma.h [deleted file]
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/highmem.c
arch/powerpc/sysdev/fsl_rio.c
arch/s390/include/asm/cputime.h
arch/s390/kernel/ptrace.c
arch/score/kernel/ptrace.c
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/ptrace_64.c
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/pci_64.h
arch/sparc/kernel/ptrace_32.c
arch/sparc/kernel/ptrace_64.c
arch/sparc/mm/highmem.c
arch/tile/Kconfig
arch/tile/kernel/ptrace.c
arch/tile/kernel/setup.c
arch/tile/mm/highmem.c
arch/um/kernel/ptrace.c
arch/um/sys-i386/ptrace.c
arch/um/sys-x86_64/ptrace.c
arch/x86/Kconfig
arch/x86/include/asm/irq.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/smp.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/mm/highmem_32.c
arch/x86/mm/init_64.c
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/op_model_amd.c
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
arch/xtensa/kernel/ptrace.c
crypto/async_tx/Kconfig
drivers/atm/eni.c
drivers/char/applicom.c
drivers/char/hvc_console.c
drivers/char/ip2/Makefile
drivers/char/ipmi/Makefile
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mmtimer.c
drivers/char/mwave/Makefile
drivers/char/mxser.c
drivers/char/pcmcia/ipwireless/Makefile
drivers/char/ppdev.c
drivers/char/ramoops.c
drivers/char/rio/Makefile
drivers/char/rocket.c
drivers/char/synclink_gt.c
drivers/char/vt_ioctl.c
drivers/connector/cn_queue.c
drivers/connector/connector.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/amba-pl08x.c [new file with mode: 0644]
drivers/dma/coh901318.c
drivers/dma/dmaengine.c
drivers/dma/fsldma.c
drivers/dma/imx-dma.c [new file with mode: 0644]
drivers/dma/imx-sdma.c [new file with mode: 0644]
drivers/dma/intel_mid_dma.c
drivers/dma/intel_mid_dma_regs.h
drivers/dma/pch_dma.c
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40_ll.c
drivers/dma/ste_dma40_ll.h
drivers/dma/timb_dma.c
drivers/firmware/dmi_scan.c
drivers/gpio/74x164.c [new file with mode: 0644]
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/adp5588-gpio.c
drivers/gpio/basic_mmio_gpio.c [new file with mode: 0644]
drivers/gpio/langwell_gpio.c
drivers/gpio/pch_gpio.c [new file with mode: 0644]
drivers/gpio/timbgpio.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r100_track.h
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/i2c/busses/scx200_acb.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/isdn/hisax/l3_1tr6.c
drivers/media/IR/lirc_dev.c
drivers/net/Kconfig
drivers/net/atl1c/atl1c.h
drivers/net/atl1c/atl1c_main.c
drivers/net/atlx/atl1.c
drivers/net/atlx/atl1.h
drivers/net/atlx/atlx.c
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_main.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_init_ops.h
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_link.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bonding/bond_main.c
drivers/net/caif/Kconfig
drivers/net/caif/Makefile
drivers/net/caif/caif_shm_u5500.c [new file with mode: 0644]
drivers/net/caif/caif_shmcore.c [new file with mode: 0644]
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/at91_can.c
drivers/net/can/flexcan.c
drivers/net/can/mcp251x.c
drivers/net/can/pch_can.c [new file with mode: 0644]
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/Makefile
drivers/net/can/sja1000/tscan1.c [new file with mode: 0644]
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb4/cxgb4.h
drivers/net/cxgb4/cxgb4_main.c
drivers/net/cxgb4/sge.c
drivers/net/e1000/e1000_main.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/gianfar.c
drivers/net/jme.c
drivers/net/macb.c
drivers/net/mlx4/icm.c
drivers/net/mlx4/icm.h
drivers/net/mlx4/port.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_ethtool.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_main.c
drivers/net/qlge/qlge_mpi.c
drivers/net/sb1000.c
drivers/net/sgiseeq.c
drivers/net/slhc.c
drivers/net/smsc911x.c
drivers/net/smsc911x.h
drivers/net/tg3.c
drivers/net/tokenring/tms380tr.c
drivers/net/typhoon.c
drivers/net/vmxnet3/upt1_defs.h
drivers/net/vmxnet3/vmxnet3_defs.h
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-ethtool.c
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-traffic.c
drivers/net/vxge/vxge-traffic.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/cmd.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/wl1251/Makefile
drivers/pci/proc.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/rapidio/rio-driver.c
drivers/rapidio/rio-scan.c
drivers/rapidio/rio-sysfs.c
drivers/rapidio/rio.c
drivers/rapidio/rio.h
drivers/rapidio/switches/Kconfig
drivers/rapidio/switches/Makefile
drivers/rapidio/switches/idt_gen2.c [new file with mode: 0644]
drivers/rapidio/switches/idtcps.c
drivers/rapidio/switches/tsi568.c
drivers/rapidio/switches/tsi57x.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-ds3232.c
drivers/rtc/rtc-jz4740.c
drivers/rtc/rtc-lpc32xx.c [new file with mode: 0644]
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-s3c.c
drivers/video/fbmem.c
drivers/video/gbefb.c
drivers/video/matrox/matroxfb_DAC1064.c
drivers/video/matrox/matroxfb_maven.c
drivers/video/omap/blizzard.c
drivers/video/savage/savagefb-i2c.c
drivers/video/via/Makefile
drivers/video/via/accel.c
drivers/video/via/accel.h
drivers/video/via/chip.h
drivers/video/via/dvi.c
drivers/video/via/dvi.h
drivers/video/via/global.h
drivers/video/via/hw.c
drivers/video/via/hw.h
drivers/video/via/ioctl.c
drivers/video/via/lcd.c
drivers/video/via/lcd.h
drivers/video/via/lcdtbl.h [deleted file]
drivers/video/via/tbl1636.c [deleted file]
drivers/video/via/tbl1636.h [deleted file]
drivers/video/via/via-core.c
drivers/video/via/via_i2c.c
drivers/video/via/viafbdev.c
drivers/video/via/viafbdev.h
drivers/video/via/vt1636.c
drivers/w1/w1.c
fs/Kconfig
fs/Kconfig.binfmt
fs/eventpoll.c
fs/exec.c
fs/ext2/balloc.c
fs/ext3/balloc.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext3/resize.c
fs/ext3/super.c
fs/ext4/Makefile
fs/ext4/balloc.c
fs/ext4/block_validity.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/page-io.c [new file with mode: 0644]
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/fcntl.c
fs/fuse/dev.c
fs/ioctl.c
fs/isofs/inode.c
fs/jbd/checkpoint.c
fs/jbd/commit.c
fs/jbd/journal.c
fs/jbd/recovery.c
fs/jbd/transaction.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/lockd/svc.c
fs/lockd/svclock.c
fs/lockd/svcsubs.c
fs/locks.c
fs/nfs/Kconfig
fs/nfsd/Kconfig
fs/nfsd/nfs4state.c
fs/proc/base.c
fs/proc/softirqs.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/quota/Kconfig
fs/quota/dquot.c
fs/select.c
fs/xfs/Kconfig
include/asm-generic/cputime.h
include/asm-generic/gpio.h
include/linux/amba/pl08x.h [new file with mode: 0644]
include/linux/basic_mmio_gpio.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/cgroup.h
include/linux/connector.h
include/linux/dmaengine.h
include/linux/fb.h
include/linux/fs.h
include/linux/highmem.h
include/linux/i2c/adp5588.h
include/linux/init_task.h
include/linux/intel_mid_dma.h
include/linux/interrupt.h
include/linux/jbd2.h
include/linux/kernel_stat.h
include/linux/kfifo.h
include/linux/netdevice.h
include/linux/percpu-defs.h
include/linux/percpu_counter.h
include/linux/phy.h
include/linux/poll.h
include/linux/ptrace.h
include/linux/ramoops.h [new file with mode: 0644]
include/linux/ring_buffer.h
include/linux/rio.h
include/linux/rio_ids.h
include/linux/rio_regs.h
include/linux/sched.h
include/linux/smp.h
include/linux/spi/74x164.h [new file with mode: 0644]
include/linux/synclink.h
include/linux/syscalls.h
include/linux/tracehook.h
include/linux/virtio_9p.h
include/linux/writeback.h
include/net/caif/caif_shm.h [new file with mode: 0644]
include/net/dst.h
include/net/fib_rules.h
include/net/garp.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_tunnel.h
include/net/ipip.h
include/net/net_namespace.h
include/net/protocol.h
include/net/sock.h
include/net/xfrm.h
include/trace/events/ext4.h
include/trace/events/irq.h
include/trace/events/jbd2.h
init/Kconfig
ipc/compat.c
ipc/compat_mq.c
ipc/shm.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cred.c
kernel/exit.c
kernel/fork.c
kernel/irq/irqdesc.c
kernel/kprobes.c
kernel/module.c
kernel/ns_cgroup.c
kernel/perf_event.c
kernel/ptrace.c
kernel/resource.c
kernel/signal.c
kernel/smp.c
kernel/softirq.c
kernel/taskstats.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_kprobe.c
kernel/tsacct.c
lib/Kconfig.debug
mm/highmem.c
mm/maccess.c
mm/memcontrol.c
mm/swap.c
net/802/garp.c
net/802/stp.c
net/8021q/vlan.c
net/core/dev.c
net/core/fib_rules.c
net/core/filter.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/pktgen.c
net/core/sock.c
net/core/sysctl_net_core.c
net/ipv4/fib_hash.c
net/ipv4/gre.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/ip_sockglue.c
net/ipv4/ipip.c
net/ipv4/protocol.c
net/ipv4/route.c
net/ipv4/tunnel4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/protocol.c
net/ipv6/raw.c
net/ipv6/sit.c
net/ipv6/tunnel6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/rate.c
net/netfilter/Kconfig
net/netfilter/xt_TPROXY.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/wireless/reg.c
sound/oss/sb_ess.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/ad73311.c
sound/soc/codecs/max98088.c
sound/soc/codecs/wm9090.c
sound/soc/fsl/pcm030-audio-fabric.c
sound/usb/card.h
sound/usb/pcm.c
sound/usb/proc.c
sound/usb/urb.c
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-record.txt
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-trace.c
tools/perf/scripts/perl/bin/failed-syscalls-report
tools/perf/scripts/perl/bin/rw-by-file-report
tools/perf/scripts/perl/bin/rw-by-pid-report
tools/perf/scripts/perl/bin/rwtop-report
tools/perf/scripts/perl/bin/wakeup-latency-report
tools/perf/scripts/perl/bin/workqueue-stats-report
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
tools/perf/scripts/python/bin/futex-contention-record [new file with mode: 0644]
tools/perf/scripts/python/bin/futex-contention-report [new file with mode: 0644]
tools/perf/scripts/python/bin/netdev-times-report
tools/perf/scripts/python/bin/sched-migration-report
tools/perf/scripts/python/bin/sctop-report
tools/perf/scripts/python/bin/syscall-counts-by-pid-report
tools/perf/scripts/python/bin/syscall-counts-report
tools/perf/scripts/python/failed-syscalls-by-pid.py
tools/perf/scripts/python/futex-contention.py [new file with mode: 0644]
tools/perf/scripts/python/sctop.py
tools/perf/scripts/python/syscall-counts-by-pid.py
tools/perf/scripts/python/syscall-counts.py
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/map.h
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/ui/browser.c

index 6e25c2659e0af2ab38b1c2cd1d700b129a96a064..a2976a6de033df2b4247f69b48f56f44abed3378 100644 (file)
@@ -21,6 +21,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/socket.h>
+#include <sys/wait.h>
 #include <signal.h>
 
 #include <linux/genetlink.h>
@@ -266,11 +267,13 @@ int main(int argc, char *argv[])
        int containerset = 0;
        char containerpath[1024];
        int cfd = 0;
+       int forking = 0;
+       sigset_t sigset;
 
        struct msgtemplate msg;
 
-       while (1) {
-               c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:");
+       while (!forking) {
+               c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:");
                if (c < 0)
                        break;
 
@@ -319,6 +322,28 @@ int main(int argc, char *argv[])
                                err(1, "Invalid pid\n");
                        cmd_type = TASKSTATS_CMD_ATTR_PID;
                        break;
+               case 'c':
+
+                       /* Block SIGCHLD for sigwait() later */
+                       if (sigemptyset(&sigset) == -1)
+                               err(1, "Failed to empty sigset");
+                       if (sigaddset(&sigset, SIGCHLD))
+                               err(1, "Failed to set sigchld in sigset");
+                       sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+                       /* fork/exec a child */
+                       tid = fork();
+                       if (tid < 0)
+                               err(1, "Fork failed\n");
+                       if (tid == 0)
+                               if (execvp(argv[optind - 1],
+                                   &argv[optind - 1]) < 0)
+                                       exit(-1);
+
+                       /* Set the command type and avoid further processing */
+                       cmd_type = TASKSTATS_CMD_ATTR_PID;
+                       forking = 1;
+                       break;
                case 'v':
                        printf("debug on\n");
                        dbg = 1;
@@ -370,6 +395,15 @@ int main(int argc, char *argv[])
                goto err;
        }
 
+       /*
+        * If we forked a child, wait for it to exit. Cannot use waitpid()
+        * as all the delicious data would be reaped as part of the wait
+        */
+       if (tid && forking) {
+               int sig_received;
+               sigwait(&sigset, &sig_received);
+       }
+
        if (tid) {
                rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
                              cmd_type, &tid, sizeof(__u32));
index b34823ff16469a0e6d57bbca9d659f5968d1e410..190018b0c64944e3463548c87e4c2d8226dcb71d 100644 (file)
@@ -18,7 +18,8 @@ CONTENTS:
   1.2 Why are cgroups needed ?
   1.3 How are cgroups implemented ?
   1.4 What does notify_on_release do ?
-  1.5 How do I use cgroups ?
+  1.5 What does clone_children do ?
+  1.6 How do I use cgroups ?
 2. Usage Examples and Syntax
   2.1 Basic Usage
   2.2 Attaching processes
@@ -293,7 +294,16 @@ notify_on_release in the root cgroup at system boot is disabled
 value of their parents notify_on_release setting. The default value of
 a cgroup hierarchy's release_agent path is empty.
 
-1.5 How do I use cgroups ?
+1.5 What does clone_children do ?
+---------------------------------
+
+If the clone_children flag is enabled (1) in a cgroup, then all
+cgroups created beneath will call the post_clone callbacks for each
+subsystem of the newly created cgroup. Usually when this callback is
+implemented for a subsystem, it copies the values of the parent
+subsystem, this is the case for the cpuset.
+
+1.6 How do I use cgroups ?
 --------------------------
 
 To start a new job that is to be contained within a cgroup, using
index f3e046a6a987a685910bafc171c2cc9cf243df1b..1a2e8aa3fbb199cea43ad7f5f214b92cf1694fa1 100644 (file)
@@ -197,6 +197,54 @@ Notes:
        example,
            # fbset -depth 16
 
+
+[Configure viafb via /proc]
+---------------------------
+    The following files exist in /proc/viafb
+
+    supported_output_devices
+
+        This read-only file contains a full ',' seperated list containing all
+        output devices that could be available on your platform. It is likely
+        that not all of those have a connector on your hardware but it should
+        provide a good starting point to figure out which of those names match
+        a real connector.
+        Example:
+        # cat /proc/viafb/supported_output_devices
+
+    iga1/output_devices
+    iga2/output_devices
+
+        These two files are readable and writable. iga1 and iga2 are the two
+        independent units that produce the screen image. Those images can be
+        forwarded to one or more output devices. Reading those files is a way
+        to query which output devices are currently used by an iga.
+        Example:
+        # cat /proc/viafb/iga1/output_devices
+        If there are no output devices printed the output of this iga is lost.
+        This can happen for example if only one (the other) iga is used.
+        Writing to these files allows adjusting the output devices during
+        runtime. One can add new devices, remove existing ones or switch
+        between igas. Essentially you can write a ',' seperated list of device
+        names (or a single one) in the same format as the output to those
+        files. You can add a '+' or '-' as a prefix allowing simple addition
+        and removal of devices. So a prefix '+' adds the devices from your list
+        to the already existing ones, '-' removes the listed devices from the
+        existing ones and if no prefix is given it replaces all existing ones
+        with the listed ones. If you remove devices they are expected to turn
+        off. If you add devices that are already part of the other iga they are
+        removed there and added to the new one.
+        Examples:
+        Add CRT as output device to iga1
+        # echo +CRT > /proc/viafb/iga1/output_devices
+
+        Remove (turn off) DVP1 and LVDS1 as output devices of iga2
+        # echo -DVP1,LVDS1 > /proc/viafb/iga2/output_devices
+
+        Replace all iga1 output devices by CRT
+        # echo CRT > /proc/viafb/iga1/output_devices
+
+
 [Bootup with viafb]:
 --------------------
     Add the following line to your grub.conf:
index d2af87ba96e11ed5edc7ac4a81e081cff23ee00c..f3da8c0a3af21ca7b2d958e415f9e83ceae01f7f 100644 (file)
@@ -526,6 +526,23 @@ Who:       FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
 
 ----------------------------
 
+What:   namespace cgroup (ns_cgroup)
+When:   2.6.38
+Why:    The ns_cgroup leads to some problems:
+       * cgroup creation is out-of-control
+       * cgroup name can conflict when pids are looping
+       * it is not possible to have a single process handling
+       a lot of namespaces without falling in a exponential creation time
+       * we may want to create a namespace without creating a cgroup
+
+       The ns_cgroup is replaced by a compatibility flag 'clone_children',
+       where a newly created cgroup will copy the parent cgroup values.
+       The userspace has to manually create a cgroup and add a task to
+       the 'tasks' file.
+Who:    Daniel Lezcano <daniel.lezcano@free.fr>
+
+----------------------------
+
 What:  iwlwifi disable_hw_scan module parameters
 When:  2.6.40
 Why:   Hareware scan is the prefer method for iwlwifi devices for
index e1def1786e5074e5d8f3cf89b5f14cbfc9fab4af..6ab9442d7eeb666e496e79acba6471b813207434 100644 (file)
@@ -353,6 +353,20 @@ noauto_da_alloc            replacing existing files via patterns such as
                        system crashes before the delayed allocation
                        blocks are forced to disk.
 
+noinit_itable          Do not initialize any uninitialized inode table
+                       blocks in the background.  This feature may be
+                       used by installation CD's so that the install
+                       process can complete as quickly as possible; the
+                       inode table initialization process would then be
+                       deferred until the next time the  file system
+                       is unmounted.
+
+init_itable=n          The lazy itable init code will wait n times the
+                       number of milliseconds it took to zero out the
+                       previous block group's inode table.  This
+                       minimizes the impact on the systme performance
+                       while file system's inode table is being initialized.
+
 discard                Controls whether ext4 should issue discard/TRIM
 nodiscard(*)           commands to the underlying block device when
                        blocks are freed.  This is useful for SSD devices
index a563b74c7aef400ccfcd8e5e0a10cd03b94523f1..e73df2722ff35ccdea2dee328b592ee01d13965a 100644 (file)
@@ -136,6 +136,7 @@ Table 1-1: Process specific entries in /proc
  statm         Process memory status information
  status                Process status in human readable form
  wchan         If CONFIG_KALLSYMS is set, a pre-decoded wchan
+ pagemap       Page table
  stack         Report full stack trace, enable via CONFIG_STACKTRACE
  smaps         a extension based on maps, showing the memory consumption of
                each mapping
@@ -370,6 +371,7 @@ Shared_Dirty:          0 kB
 Private_Clean:         0 kB
 Private_Dirty:         0 kB
 Referenced:          892 kB
+Anonymous:             0 kB
 Swap:                  0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
@@ -378,9 +380,15 @@ The first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
 (size), the amount of the mapping that is currently resident in RAM (RSS), the
 process' proportional share of this mapping (PSS), the number of clean and
-dirty shared pages in the mapping, and the number of clean and dirty private
-pages in the mapping.  The "Referenced" indicates the amount of memory
-currently marked as referenced or accessed.
+dirty private pages in the mapping.  Note that even a page which is part of a
+MAP_SHARED mapping, but has only a single pte mapped, i.e.  is currently used
+by only one process, is accounted as private and not as shared.  "Referenced"
+indicates the amount of memory currently marked as referenced or accessed.
+"Anonymous" shows the amount of memory that does not belong to any file.  Even
+a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
+and a page is modified, the file page is replaced by a private anonymous copy.
+"Swap" shows how much would-be-anonymous memory is also used, but out on
+swap.
 
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
@@ -397,6 +405,9 @@ To clear the bits for the file mapped pages associated with the process
     > echo 3 > /proc/PID/clear_refs
 Any other value written to /proc/PID/clear_refs will have no effect.
 
+The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
+using /proc/kpageflags and number of times a page is mapped using
+/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
 
 1.2 Kernel data
 ---------------
index 88bb71b46da4217b8704bf10b8ccae68d1ad832d..9eb1ba52013df53040af34f44b95745a049f8e39 100644 (file)
@@ -177,18 +177,6 @@ Doing it all yourself
  
    A convenience function to print out the PHY status neatly.
 
- int phy_clear_interrupt(struct phy_device *phydev);
- int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
-   
-   Clear the PHY's interrupt, and configure which ones are allowed,
-   respectively.  Currently only supports all on, or all off.
- int phy_enable_interrupts(struct phy_device *phydev);
- int phy_disable_interrupts(struct phy_device *phydev);
-
-   Functions which enable/disable PHY interrupts, clearing them
-   before and after, respectively.
-
  int phy_start_interrupts(struct phy_device *phydev);
  int phy_stop_interrupts(struct phy_device *phydev);
 
@@ -213,12 +201,6 @@ Doing it all yourself
    Fills the phydev structure with up-to-date information about the current
    settings in the PHY.
 
- void phy_sanitize_settings(struct phy_device *phydev)
-   
-   Resolves differences between currently desired settings, and
-   supported settings for the given PHY device.  Does not make
-   the changes in the hardware, though.
-
  int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
  int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 
index b606c2c4dd37c4f2e5f5caf7803d66adadd59e6a..30289fab86ebb3598466822c36c59481b30f60d3 100644 (file)
@@ -80,8 +80,10 @@ dirty_background_bytes
 Contains the amount of dirty memory at which the pdflush background writeback
 daemon will start writeback.
 
-If dirty_background_bytes is written, dirty_background_ratio becomes a function
-of its value (dirty_background_bytes / the amount of dirtyable system memory).
+Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
+one of them may be specified at a time. When one sysctl is written it is
+immediately taken into account to evaluate the dirty memory limits and the
+other appears as 0 when read.
 
 ==============================================================
 
@@ -97,8 +99,10 @@ dirty_bytes
 Contains the amount of dirty memory at which a process generating disk writes
 will itself start writeback.
 
-If dirty_bytes is written, dirty_ratio becomes a function of its value
-(dirty_bytes / the amount of dirtyable system memory).
+Note: dirty_bytes is the counterpart of dirty_ratio. Only one of them may be
+specified at a time. When one sysctl is written it is immediately taken into
+account to evaluate the dirty memory limits and the other appears as 0 when
+read.
 
 Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
diff --git a/Kbuild b/Kbuild
index 431f7ca2404cb2cf5bbc5b40050f032a8986888a..b00037ad7e03b2d5953572a440b5c6f1a1c5708e 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
        "/^->/{s:->#\(.*\):/* \1 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
        s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
        s:->::; p;}"
 endef
index debde0128cd0f661937d759923d2dd0790cc2fac..1e6b6bdf63403950bc7cc573971f2399fe2c94ec 100644 (file)
@@ -4448,7 +4448,7 @@ L:        platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/panasonic-laptop.c
 
-PANASONIC MN10300/AM33 PORT
+PANASONIC MN10300/AM33/AM34 PORT
 M:     David Howells <dhowells@redhat.com>
 M:     Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
 L:     linux-am33-list@redhat.com (moderated for non-subscribers)
index d1dbd9acd1df47d8bbf2046720a3b5c2792d4076..022c2748fa410569230094097bb491c8d8595839 100644 (file)
@@ -223,7 +223,7 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
  */
 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
 {
-       dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+       dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
        int ok = 1;
 
        /* If this is not set, the machine doesn't support DAC at all.  */
@@ -756,7 +756,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
        spin_lock_irqsave(&arena->lock, flags);
 
        for (end = sg + nents; sg < end; ++sg) {
-               dma64_addr_t addr;
+               dma_addr_t addr;
                size_t size;
                long npages, ofs;
                dma_addr_t tend;
index baa903602f6a03c86a8d841ba3ce751c3ece0cb9..e2af5eb59bb432ad870b2d2a08d46a74e97c82fe 100644 (file)
@@ -269,7 +269,8 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        size_t copied;
@@ -292,7 +293,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PTRACE_PEEKUSR:
                force_successful_syscall_return();
                ret = get_reg(child, addr);
-               DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+               DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret));
                break;
 
        /* When I and D space are separate, this will have to be fixed.  */
@@ -302,7 +303,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_POKEUSR: /* write the specified register */
-               DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+               DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data));
                ret = put_reg(child, addr, data);
                break;
        default:
index e0cb6370ed148d9c24d6e856bc6c5c46128f38cb..3e97483abcf029ea15eee056d4fa400b2e170a34 100644 (file)
@@ -1075,13 +1075,15 @@ out:
 }
 #endif
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
                case PTRACE_PEEKUSR:
-                       ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
+                       ret = ptrace_read_user(child, addr, datap);
                        break;
 
                case PTRACE_POKEUSR:
@@ -1089,34 +1091,34 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        break;
 
                case PTRACE_GETREGS:
-                       ret = ptrace_getregs(child, (void __user *)data);
+                       ret = ptrace_getregs(child, datap);
                        break;
 
                case PTRACE_SETREGS:
-                       ret = ptrace_setregs(child, (void __user *)data);
+                       ret = ptrace_setregs(child, datap);
                        break;
 
                case PTRACE_GETFPREGS:
-                       ret = ptrace_getfpregs(child, (void __user *)data);
+                       ret = ptrace_getfpregs(child, datap);
                        break;
                
                case PTRACE_SETFPREGS:
-                       ret = ptrace_setfpregs(child, (void __user *)data);
+                       ret = ptrace_setfpregs(child, datap);
                        break;
 
 #ifdef CONFIG_IWMMXT
                case PTRACE_GETWMMXREGS:
-                       ret = ptrace_getwmmxregs(child, (void __user *)data);
+                       ret = ptrace_getwmmxregs(child, datap);
                        break;
 
                case PTRACE_SETWMMXREGS:
-                       ret = ptrace_setwmmxregs(child, (void __user *)data);
+                       ret = ptrace_setwmmxregs(child, datap);
                        break;
 #endif
 
                case PTRACE_GET_THREAD_AREA:
                        ret = put_user(task_thread_info(child)->tp_value,
-                                      (unsigned long __user *) data);
+                                      datap);
                        break;
 
                case PTRACE_SET_SYSCALL:
@@ -1126,21 +1128,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
 #ifdef CONFIG_CRUNCH
                case PTRACE_GETCRUNCHREGS:
-                       ret = ptrace_getcrunchregs(child, (void __user *)data);
+                       ret = ptrace_getcrunchregs(child, datap);
                        break;
 
                case PTRACE_SETCRUNCHREGS:
-                       ret = ptrace_setcrunchregs(child, (void __user *)data);
+                       ret = ptrace_setcrunchregs(child, datap);
                        break;
 #endif
 
 #ifdef CONFIG_VFP
                case PTRACE_GETVFPREGS:
-                       ret = ptrace_getvfpregs(child, (void __user *)data);
+                       ret = ptrace_getvfpregs(child, datap);
                        break;
 
                case PTRACE_SETVFPREGS:
-                       ret = ptrace_setvfpregs(child, (void __user *)data);
+                       ret = ptrace_setvfpregs(child, datap);
                        break;
 #endif
 
index 287431cc13e59d32f4e592d6e1d58eb247c378b2..ac6fd713828ad522cb0182c0ff9991406a8b49aa 100644 (file)
@@ -27,6 +27,8 @@
 
 #define imx_has_dma_v1()       (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
 
+#include <mach/dma.h>
+
 #define IMX_DMA_CHANNELS  16
 
 #define DMA_MODE_READ          0
@@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name);
 
 void imx_dma_free(int channel);
 
-enum imx_dma_prio {
-       DMA_PRIO_HIGH = 0,
-       DMA_PRIO_MEDIUM = 1,
-       DMA_PRIO_LOW = 2
-};
-
 int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
 
 #endif /* __MACH_DMA_V1_H__ */
index 2f420210d4069cbfd299d7413cd527e4540a742b..9057d6fd1d31d441dcc99c234c2d838c91e65a72 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/io.h>
 #include <linux/cnt32_to_63.h>
 
-#include <asm/mach/time.h>
 #include <asm/mach/time.h>
 #include <asm/localtimer.h>
 
index cbbe69a76a7ccf773c4ea408bd1f5d268bbf6fbf..4a94be3304b91a852a7732d5b6e8eeda5b37866d 100644 (file)
@@ -208,35 +208,25 @@ static struct resource dma40_resources[] = {
 
 /* Default configuration for physcial memcpy */
 struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
-       .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE |
-                        STEDMA40_LOW_PRIORITY_CHANNEL |
-                        STEDMA40_PCHAN_BASIC_MODE),
+       .mode = STEDMA40_MODE_PHYSICAL,
        .dir = STEDMA40_MEM_TO_MEM,
 
-       .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
        .src_info.data_width = STEDMA40_BYTE_WIDTH,
        .src_info.psize = STEDMA40_PSIZE_PHY_1,
        .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 
-       .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
        .dst_info.data_width = STEDMA40_BYTE_WIDTH,
        .dst_info.psize = STEDMA40_PSIZE_PHY_1,
        .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 };
 /* Default configuration for logical memcpy */
 struct stedma40_chan_cfg dma40_memcpy_conf_log = {
-       .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
-                        STEDMA40_LOW_PRIORITY_CHANNEL |
-                        STEDMA40_LCHAN_SRC_LOG_DST_LOG |
-                        STEDMA40_NO_TIM_FOR_LINK),
        .dir = STEDMA40_MEM_TO_MEM,
 
-       .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
        .src_info.data_width = STEDMA40_BYTE_WIDTH,
        .src_info.psize = STEDMA40_PSIZE_LOG_1,
        .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 
-       .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
        .dst_info.data_width = STEDMA40_BYTE_WIDTH,
        .dst_info.psize = STEDMA40_PSIZE_LOG_1,
        .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
@@ -269,7 +259,6 @@ static struct stedma40_platform_data dma40_plat_data = {
        .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
        .memcpy_conf_phy = &dma40_memcpy_conf_phy,
        .memcpy_conf_log = &dma40_memcpy_conf_log,
-       .llis_per_log = 8,
        .disabled_channels = {-1},
 };
 
index c00f119babbfe59a5d51a87fd5ba13f1e4c0f190..c435fd9e1da95c9fdc9d7fab83b3a42caef1b905 100644 (file)
@@ -89,7 +89,7 @@ void __kunmap_atomic(void *kvaddr)
        int idx, type;
 
        if (kvaddr >= (void *)FIXADDR_START) {
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR * smp_processor_id();
 
                if (cache_is_vivt())
@@ -101,6 +101,7 @@ void __kunmap_atomic(void *kvaddr)
 #else
                (void) idx;  /* to kill a warning */
 #endif
+               kmap_atomic_idx_pop();
        } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
                /* this address was obtained through kmap_high_get() */
                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
diff --git a/arch/arm/plat-mxc/include/mach/dma.h b/arch/arm/plat-mxc/include/mach/dma.h
new file mode 100644 (file)
index 0000000..ef77515
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MXC_DMA_H__
+#define __ASM_ARCH_MXC_DMA_H__
+
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/*
+ * This enumerates peripheral types. Used for SDMA.
+ */
+enum sdma_peripheral_type {
+       IMX_DMATYPE_SSI,        /* MCU domain SSI */
+       IMX_DMATYPE_SSI_SP,     /* Shared SSI */
+       IMX_DMATYPE_MMC,        /* MMC */
+       IMX_DMATYPE_SDHC,       /* SDHC */
+       IMX_DMATYPE_UART,       /* MCU domain UART */
+       IMX_DMATYPE_UART_SP,    /* Shared UART */
+       IMX_DMATYPE_FIRI,       /* FIRI */
+       IMX_DMATYPE_CSPI,       /* MCU domain CSPI */
+       IMX_DMATYPE_CSPI_SP,    /* Shared CSPI */
+       IMX_DMATYPE_SIM,        /* SIM */
+       IMX_DMATYPE_ATA,        /* ATA */
+       IMX_DMATYPE_CCM,        /* CCM */
+       IMX_DMATYPE_EXT,        /* External peripheral */
+       IMX_DMATYPE_MSHC,       /* Memory Stick Host Controller */
+       IMX_DMATYPE_MSHC_SP,    /* Shared Memory Stick Host Controller */
+       IMX_DMATYPE_DSP,        /* DSP */
+       IMX_DMATYPE_MEMORY,     /* Memory */
+       IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+       IMX_DMATYPE_SPDIF,      /* SPDIF */
+       IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+       IMX_DMATYPE_ASRC,       /* ASRC */
+       IMX_DMATYPE_ESAI,       /* ESAI */
+};
+
+enum imx_dma_prio {
+       DMA_PRIO_HIGH = 0,
+       DMA_PRIO_MEDIUM = 1,
+       DMA_PRIO_LOW = 2
+};
+
+struct imx_dma_data {
+       int dma_request; /* DMA request line */
+       enum sdma_peripheral_type peripheral_type;
+       int priority;
+};
+
+static inline int imx_dma_is_ipu(struct dma_chan *chan)
+{
+       return !strcmp(dev_name(chan->device->dev), "ipu-core");
+}
+
+static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+{
+       return !strcmp(dev_name(chan->device->dev), "imx-sdma") ||
+               !strcmp(dev_name(chan->device->dev), "imx-dma");
+}
+
+#endif
diff --git a/arch/arm/plat-mxc/include/mach/sdma.h b/arch/arm/plat-mxc/include/mach/sdma.h
new file mode 100644 (file)
index 0000000..9be1122
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __MACH_MXC_SDMA_H__
+#define __MACH_MXC_SDMA_H__
+
+/**
+ * struct sdma_platform_data - platform specific data for SDMA engine
+ *
+ * @sdma_version       The version of this SDMA engine
+ * @cpu_name           used to generate the firmware name
+ * @to_version         CPU Tape out version
+ */
+struct sdma_platform_data {
+       int sdma_version;
+       char *cpu_name;
+       int to_version;
+};
+
+#endif /* __MACH_MXC_SDMA_H__ */
index 5fbde4b8dc1236f9599611909264b104eddaf548..74b62f10d07f8addaf0ad6d5ad7c40d5cadfd12f 100644 (file)
@@ -1,10 +1,8 @@
 /*
- * arch/arm/plat-nomadik/include/plat/ste_dma40.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
  */
 
 
 #include <linux/dmaengine.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
-#include <linux/dmaengine.h>
 
 /* dev types for memcpy */
 #define STEDMA40_DEV_DST_MEMORY (-1)
 #define        STEDMA40_DEV_SRC_MEMORY (-1)
 
-/*
- * Description of bitfields of channel_type variable is available in
- * the info structure.
- */
+enum stedma40_mode {
+       STEDMA40_MODE_LOGICAL = 0,
+       STEDMA40_MODE_PHYSICAL,
+       STEDMA40_MODE_OPERATION,
+};
 
-/* Priority */
-#define STEDMA40_INFO_PRIO_TYPE_POS 2
-#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
-#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
-
-/* Mode  */
-#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
-#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-
-/* Mode options */
-#define STEDMA40_INFO_CH_MODE_OPT_POS 8
-#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-
-/* Interrupt */
-#define STEDMA40_INFO_TIM_POS 10
-#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
-#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
-
-/* End of channel_type configuration */
+enum stedma40_mode_opt {
+       STEDMA40_PCHAN_BASIC_MODE = 0,
+       STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
+       STEDMA40_PCHAN_MODULO_MODE,
+       STEDMA40_PCHAN_DOUBLE_DST_MODE,
+       STEDMA40_LCHAN_SRC_PHY_DST_LOG,
+       STEDMA40_LCHAN_SRC_LOG_DST_PHY,
+};
 
 #define STEDMA40_ESIZE_8_BIT  0x0
 #define STEDMA40_ESIZE_16_BIT 0x1
 #define STEDMA40_PSIZE_LOG_8  STEDMA40_PSIZE_PHY_8
 #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
 
+/* Maximum number of possible physical channels */
+#define STEDMA40_MAX_PHYS 32
+
 enum stedma40_flow_ctrl {
        STEDMA40_NO_FLOW_CTRL,
        STEDMA40_FLOW_CTRL,
 };
 
-enum stedma40_endianess {
-       STEDMA40_LITTLE_ENDIAN,
-       STEDMA40_BIG_ENDIAN
-};
-
 enum stedma40_periph_data_width {
        STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
        STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
@@ -90,34 +68,40 @@ enum stedma40_periph_data_width {
        STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
 };
 
-struct stedma40_half_channel_info {
-       enum stedma40_endianess endianess;
-       enum stedma40_periph_data_width data_width;
-       int psize;
-       enum stedma40_flow_ctrl flow_ctrl;
-};
-
 enum stedma40_xfer_dir {
-       STEDMA40_MEM_TO_MEM,
+       STEDMA40_MEM_TO_MEM = 1,
        STEDMA40_MEM_TO_PERIPH,
        STEDMA40_PERIPH_TO_MEM,
        STEDMA40_PERIPH_TO_PERIPH
 };
 
 
+/**
+ * struct stedma40_chan_cfg - dst/src channel configuration
+ *
+ * @big_endian: true if the src/dst should be read as big endian
+ * @data_width: Data width of the src/dst hardware
+ * @p_size: Burst size
+ * @flow_ctrl: Flow control on/off.
+ */
+struct stedma40_half_channel_info {
+       bool big_endian;
+       enum stedma40_periph_data_width data_width;
+       int psize;
+       enum stedma40_flow_ctrl flow_ctrl;
+};
+
 /**
  * struct stedma40_chan_cfg - Structure to be filled by client drivers.
  *
  * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @channel_type: priority, mode, mode options and interrupt configuration.
+ * @high_priority: true if high-priority
+ * @mode: channel mode: physical, logical, or operation
+ * @mode_opt: options for the chosen channel mode
  * @src_dev_type: Src device type
  * @dst_dev_type: Dst device type
  * @src_info: Parameters for dst half channel
  * @dst_info: Parameters for dst half channel
- * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
- * @pre_transfer: Callback used if needed before preparation of transfer.
- * Only called if device is set. size of bytes to transfer
- * (in case of multiple element transfer size is size of the first element).
  *
  *
  * This structure has to be filled by the client drivers.
@@ -126,15 +110,13 @@ enum stedma40_xfer_dir {
  */
 struct stedma40_chan_cfg {
        enum stedma40_xfer_dir                   dir;
-       unsigned int                             channel_type;
+       bool                                     high_priority;
+       enum stedma40_mode                       mode;
+       enum stedma40_mode_opt                   mode_opt;
        int                                      src_dev_type;
        int                                      dst_dev_type;
        struct stedma40_half_channel_info        src_info;
        struct stedma40_half_channel_info        dst_info;
-       void                                    *pre_transfer_data;
-       int (*pre_transfer)                     (struct dma_chan *chan,
-                                                void *data,
-                                                int size);
 };
 
 /**
@@ -147,7 +129,6 @@ struct stedma40_chan_cfg {
  * @memcpy_len: length of memcpy
  * @memcpy_conf_phy: default configuration of physical channel memcpy
  * @memcpy_conf_log: default configuration of logical channel memcpy
- * @llis_per_log: number of max linked list items per logical channel
  * @disabled_channels: A vector, ending with -1, that marks physical channels
  * that are for different reasons not available for the driver.
  */
@@ -159,23 +140,10 @@ struct stedma40_platform_data {
        u32                              memcpy_len;
        struct stedma40_chan_cfg        *memcpy_conf_phy;
        struct stedma40_chan_cfg        *memcpy_conf_log;
-       unsigned int                     llis_per_log;
-       int                              disabled_channels[8];
+       int                              disabled_channels[STEDMA40_MAX_PHYS];
 };
 
-/**
- * setdma40_set_psize() - Used for changing the package size of an
- * already configured dma channel.
- *
- * @chan: dmaengine handle
- * @src_psize: new package side for src. (STEDMA40_PSIZE*)
- * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
- *
- * returns 0 on ok, otherwise negative error number.
- */
-int stedma40_set_psize(struct dma_chan *chan,
-                      int src_psize,
-                      int dst_psize);
+#ifdef CONFIG_STE_DMA40
 
 /**
  * stedma40_filter() - Provides stedma40_chan_cfg to the
@@ -238,4 +206,21 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
                                                  direction, flags);
 }
 
+#else
+static inline bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+       return false;
+}
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+                                           dma_addr_t addr,
+                                           unsigned int size,
+                                           enum dma_data_direction direction,
+                                           unsigned long flags)
+{
+       return NULL;
+}
+#endif
+
 #endif
index 5e73c25f8f85b2165652b30555c07813accfb0d1..4aedcab7cd4b9257343e8f0be68edea2b48cd77e 100644 (file)
@@ -146,9 +146,11 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
        return ret;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       void __user *datap = (void __user *) data;
 
        switch (request) {
        /* Read the word at location addr in the child process */
@@ -158,8 +160,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_PEEKUSR:
-               ret = ptrace_read_user(child, addr,
-                                      (unsigned long __user *)data);
+               ret = ptrace_read_user(child, addr, datap);
                break;
 
        /* Write the word in data at location addr */
@@ -173,11 +174,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (void __user *)data);
+               ret = ptrace_getregs(child, datap);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (const void __user *)data);
+               ret = ptrace_setregs(child, datap);
                break;
 
        default:
index b35839354130d902453b0cdffad4ed9de4a583f7..75089f80855d0ca590dc6b9c7e9a2d30011c82ad 100644 (file)
  * Get contents of register REGNO in task TASK.
  */
 static inline long
-get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
+get_reg(struct task_struct *task, unsigned long regno,
+       unsigned long __user *datap)
 {
        long tmp;
        struct pt_regs *regs = task_pt_regs(task);
 
-       if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+       if (regno & 3 || regno > PT_LAST_PSEUDO)
                return -EIO;
 
        switch (regno) {
@@ -74,11 +75,11 @@ get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
  * Write contents of register REGNO in task TASK.
  */
 static inline int
-put_reg(struct task_struct *task, long regno, unsigned long data)
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
 {
        struct pt_regs *regs = task_pt_regs(task);
 
-       if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+       if (regno & 3 || regno > PT_LAST_PSEUDO)
                return -EIO;
 
        switch (regno) {
@@ -240,7 +241,8 @@ void user_disable_single_step(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
        unsigned long __user *datap = (unsigned long __user *)data;
@@ -368,14 +370,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_bfin_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETREGS:
                pr_debug("ptrace: PTRACE_SETREGS\n");
                return copy_regset_from_user(child, &user_bfin_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *)data);
+                                            datap);
 
        case_default:
        default:
index e70c804e9377d987e67e0851f8b29ac35ce24e14..320065f3cbe5d63ededd695fc2a23412cf033a07 100644 (file)
@@ -76,9 +76,11 @@ ptrace_disable(struct task_struct *child)
  * (in user space) where the result of the ptrace call is written (instead of
  * being returned).
  */
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned int regno = addr >> 2;
        unsigned long __user *datap = (unsigned long __user *)data;
 
        switch (request) {
@@ -93,10 +95,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
 
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       tmp = get_reg(child, addr >> 2);
+                       tmp = get_reg(child, regno);
                        ret = put_user(tmp, datap);
                        break;
                }
@@ -110,19 +112,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                /* Write the word at location address in the USER area. */
                case PTRACE_POKEUSR:
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       addr >>= 2;
-
-                       if (addr == PT_DCCR) {
+                       if (regno == PT_DCCR) {
                                /* don't allow the tracing process to change stuff like
                                 * interrupt enable, kernel/user bit, dma enables etc.
                                 */
                                data &= DCCR_MASK;
                                data |= get_reg(child, PT_DCCR) & ~DCCR_MASK;
                        }
-                       if (put_reg(child, addr, data))
+                       if (put_reg(child, regno, data))
                                break;
                        ret = 0;
                        break;
@@ -141,7 +141,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                        break;
                                }
                                
-                               data += sizeof(long);
+                               datap++;
                        }
 
                        break;
@@ -165,7 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                }
                                
                                put_reg(child, i, tmp);
-                               data += sizeof(long);
+                               datap++;
                        }
                        
                        break;
index f4ebd1e7d0f598e369d1abd8a620e05fc00076c1..511ece94a574a7b6ae2538774923e3d8e760140f 100644 (file)
@@ -126,9 +126,11 @@ ptrace_disable(struct task_struct *child)
 }
 
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned int regno = addr >> 2;
        unsigned long __user *datap = (unsigned long __user *)data;
 
        switch (request) {
@@ -163,10 +165,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
 
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       tmp = get_reg(child, addr >> 2);
+                       tmp = get_reg(child, regno);
                        ret = put_user(tmp, datap);
                        break;
                }
@@ -180,19 +182,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                /* Write the word at location address in the USER area. */
                case PTRACE_POKEUSR:
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+                       if ((addr & 3) || regno > PT_MAX)
                                break;
 
-                       addr >>= 2;
-
-                       if (addr == PT_CCS) {
+                       if (regno == PT_CCS) {
                                /* don't allow the tracing process to change stuff like
                                 * interrupt enable, kernel/user bit, dma enables etc.
                                 */
                                data &= CCS_MASK;
                                data |= get_reg(child, PT_CCS) & ~CCS_MASK;
                        }
-                       if (put_reg(child, addr, data))
+                       if (put_reg(child, regno, data))
                                break;
                        ret = 0;
                        break;
index fac028936a041aab80f80506901e643787caaf3b..9d68f7fac73041c6902c1e98b90580a59fe0f82f 100644 (file)
@@ -254,23 +254,26 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        int ret;
+       int regno = addr >> 2;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
                /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR: {
                tmp = 0;
                ret = -EIO;
-               if ((addr & 3) || addr < 0)
+               if (addr & 3)
                        break;
 
                ret = 0;
-               switch (addr >> 2) {
+               switch (regno) {
                case 0 ... PT__END - 1:
-                       tmp = get_reg(child, addr >> 2);
+                       tmp = get_reg(child, regno);
                        break;
 
                case PT__END + 0:
@@ -299,23 +302,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
 
                if (ret == 0)
-                       ret = put_user(tmp, (unsigned long *) data);
+                       ret = put_user(tmp, datap);
                break;
        }
 
        case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                ret = -EIO;
-               if ((addr & 3) || addr < 0)
+               if (addr & 3)
                        break;
 
-               ret = 0;
-               switch (addr >> 2) {
+               switch (regno) {
                case 0 ... PT__END - 1:
-                       ret = put_reg(child, addr >> 2, data);
-                       break;
-
-               default:
-                       ret = -EIO;
+                       ret = put_reg(child, regno, data);
                        break;
                }
                break;
@@ -324,25 +322,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_frv_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(child->thread.user->i),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETREGS:    /* Set all integer regs in the child. */
                return copy_regset_from_user(child, &user_frv_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(child->thread.user->i),
-                                            (const void __user *)data);
+                                            datap);
 
        case PTRACE_GETFPREGS:  /* Get the child FP/Media state. */
                return copy_regset_to_user(child, &user_frv_native_view,
                                           REGSET_FPMEDIA,
                                           0, sizeof(child->thread.user->f),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETFPREGS:  /* Set the child FP/Media state. */
                return copy_regset_from_user(child, &user_frv_native_view,
                                             REGSET_FPMEDIA,
                                             0, sizeof(child->thread.user->f),
-                                            (const void __user *)data);
+                                            datap);
 
        default:
                ret = ptrace_request(child, request, addr, data);
index 61088dcc159432ad6063d60aee452d7866a2ef4f..fd7fcd4c2e3399d844f50fed780093880639cb10 100644 (file)
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(__kmap_atomic);
 
 void __kunmap_atomic(void *kvaddr)
 {
-       int type = kmap_atomic_idx_pop();
+       int type = kmap_atomic_idx();
        switch (type) {
        case 0:         __kunmap_atomic_primary(4, 6);  break;
        case 1:         __kunmap_atomic_primary(5, 7);  break;
@@ -83,6 +83,7 @@ void __kunmap_atomic(void *kvaddr)
        default:
                BUG();
        }
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index df114122ebdf6b42659e42c86df594ba84bc544b..497fa89b5df40815ca3b20b4c252a8d9e09c3d1a 100644 (file)
@@ -50,27 +50,29 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       int regno = addr >> 2;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
                case PTRACE_PEEKUSR: {
                        unsigned long tmp = 0;
                        
-                       if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+                       if ((addr & 3) || addr >= sizeof(struct user)) {
                                ret = -EIO;
                                break ;
                        }
                        
                        ret = 0;  /* Default return condition */
-                       addr = addr >> 2; /* temporary hack. */
 
-                       if (addr < H8300_REGS_NO)
-                               tmp = h8300_get_reg(child, addr);
+                       if (regno < H8300_REGS_NO)
+                               tmp = h8300_get_reg(child, regno);
                        else {
-                               switch(addr) {
+                               switch (regno) {
                                case 49:
                                        tmp = child->mm->start_code;
                                        break ;
@@ -88,24 +90,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                }
                        }
                        if (!ret)
-                               ret = put_user(tmp,(unsigned long *) data);
+                               ret = put_user(tmp, datap);
                        break ;
                }
 
       /* when I and D space are separate, this will have to be fixed. */
                case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-                       if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+                       if ((addr & 3) || addr >= sizeof(struct user)) {
                                ret = -EIO;
                                break ;
                        }
-                       addr = addr >> 2; /* temporary hack. */
                            
-                       if (addr == PT_ORIG_ER0) {
+                       if (regno == PT_ORIG_ER0) {
                                ret = -EIO;
                                break ;
                        }
-                       if (addr < H8300_REGS_NO) {
-                               ret = h8300_put_reg(child, addr, data);
+                       if (regno < H8300_REGS_NO) {
+                               ret = h8300_put_reg(child, regno, data);
                                break ;
                        }
                        ret = -EIO;
@@ -116,11 +117,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
                        for (i = 0; i < H8300_REGS_NO; i++) {
                            tmp = h8300_get_reg(child, i);
-                           if (put_user(tmp, (unsigned long *) data)) {
+                           if (put_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
@@ -130,12 +131,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        int i;
                        unsigned long tmp;
                        for (i = 0; i < H8300_REGS_NO; i++) {
-                           if (get_user(tmp, (unsigned long *) data)) {
+                           if (get_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
                            h8300_put_reg(child, i, tmp);
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
index 7fa8a859466017410a58e2f483721623cf1d6694..6073b187528a26a8ac16826aa98cd7986c5724ed 100644 (file)
@@ -56,10 +56,10 @@ typedef u64 cputime64_t;
 #define jiffies64_to_cputime64(__jif)  ((__jif) * (NSEC_PER_SEC / HZ))
 
 /*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
  */
-#define cputime_to_msecs(__ct)         ((__ct) / NSEC_PER_MSEC)
-#define msecs_to_cputime(__msecs)      ((__msecs) * NSEC_PER_MSEC)
+#define cputime_to_usecs(__ct)         ((__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs)      ((__usecs) * NSEC_PER_USEC)
 
 /*
  * Convert cputime <-> seconds
index 7c7909f9bc938b1bda4f1ece171ebd962aec2cfd..8848f43d819e55ba91bf07fc6ae8756f88e7ad36 100644 (file)
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
 }
 
 long
-arch_ptrace (struct task_struct *child, long request, long addr, long data)
+arch_ptrace (struct task_struct *child, long request,
+            unsigned long addr, unsigned long data)
 {
        switch (request) {
        case PTRACE_PEEKTEXT:
index 0021ade4cba8c86bf1d2fd348b283d8cac591955..20743754f2b218fad41b16898beab57a687e1f90 100644 (file)
@@ -622,9 +622,11 @@ void ptrace_disable(struct task_struct *child)
 }
 
 long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+           unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /*
@@ -639,8 +641,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
         * read the word at location addr in the USER area.
         */
        case PTRACE_PEEKUSR:
-               ret = ptrace_read_user(child, addr,
-                                      (unsigned long __user *)data);
+               ret = ptrace_read_user(child, addr, datap);
                break;
 
        /*
@@ -661,11 +662,11 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (void __user *)data);
+               ret = ptrace_getregs(child, datap);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (void __user *)data);
+               ret = ptrace_setregs(child, datap);
                break;
 
        default:
index 616e59752c29bc15f6c0e64c219c626caf755dca..0b252683cefb21a3d057987833a22ce1ea8b17be 100644 (file)
@@ -156,55 +156,57 @@ void user_disable_single_step(struct task_struct *child)
        singlestep_disable(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        int i, ret = 0;
+       int regno = addr >> 2; /* temporary hack. */
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR:
                if (addr & 3)
                        goto out_eio;
-               addr >>= 2;     /* temporary hack. */
 
-               if (addr >= 0 && addr < 19) {
-                       tmp = get_reg(child, addr);
-               } else if (addr >= 21 && addr < 49) {
-                       tmp = child->thread.fp[addr - 21];
+               if (regno >= 0 && regno < 19) {
+                       tmp = get_reg(child, regno);
+               } else if (regno >= 21 && regno < 49) {
+                       tmp = child->thread.fp[regno - 21];
                        /* Convert internal fpu reg representation
                         * into long double format
                         */
-                       if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+                       if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
                                tmp = ((tmp & 0xffff0000) << 15) |
                                      ((tmp & 0x0000ffff) << 16);
                } else
                        goto out_eio;
-               ret = put_user(tmp, (unsigned long *)data);
+               ret = put_user(tmp, datap);
                break;
 
-       case PTRACE_POKEUSR:    /* write the word at location addr in the USER area */
+       case PTRACE_POKEUSR:
+       /* write the word at location addr in the USER area */
                if (addr & 3)
                        goto out_eio;
-               addr >>= 2;     /* temporary hack. */
 
-               if (addr == PT_SR) {
+               if (regno == PT_SR) {
                        data &= SR_MASK;
                        data |= get_reg(child, PT_SR) & ~SR_MASK;
                }
-               if (addr >= 0 && addr < 19) {
-                       if (put_reg(child, addr, data))
+               if (regno >= 0 && regno < 19) {
+                       if (put_reg(child, regno, data))
                                goto out_eio;
-               } else if (addr >= 21 && addr < 48) {
+               } else if (regno >= 21 && regno < 48) {
                        /* Convert long double format
                         * into internal fpu reg representation
                         */
-                       if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
-                               data = (unsigned long)data << 15;
+                       if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
+                               data <<= 15;
                                data = (data & 0xffff0000) |
                                       ((data & 0x0000ffff) >> 1);
                        }
-                       child->thread.fp[addr - 21] = data;
+                       child->thread.fp[regno - 21] = data;
                } else
                        goto out_eio;
                break;
@@ -212,16 +214,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PTRACE_GETREGS:    /* Get all gp regs from the child. */
                for (i = 0; i < 19; i++) {
                        tmp = get_reg(child, i);
-                       ret = put_user(tmp, (unsigned long *)data);
+                       ret = put_user(tmp, datap);
                        if (ret)
                                break;
-                       data += sizeof(long);
+                       datap++;
                }
                break;
 
        case PTRACE_SETREGS:    /* Set all gp regs in the child. */
                for (i = 0; i < 19; i++) {
-                       ret = get_user(tmp, (unsigned long *)data);
+                       ret = get_user(tmp, datap);
                        if (ret)
                                break;
                        if (i == PT_SR) {
@@ -229,25 +231,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                tmp |= get_reg(child, PT_SR) & ~SR_MASK;
                        }
                        put_reg(child, i, tmp);
-                       data += sizeof(long);
+                       datap++;
                }
                break;
 
        case PTRACE_GETFPREGS:  /* Get the child FPU state. */
-               if (copy_to_user((void *)data, &child->thread.fp,
+               if (copy_to_user(datap, &child->thread.fp,
                                 sizeof(struct user_m68kfp_struct)))
                        ret = -EFAULT;
                break;
 
        case PTRACE_SETFPREGS:  /* Set the child FPU state. */
-               if (copy_from_user(&child->thread.fp, (void *)data,
+               if (copy_from_user(&child->thread.fp, datap,
                                   sizeof(struct user_m68kfp_struct)))
                        ret = -EFAULT;
                break;
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(task_thread_info(child)->tp_value,
-                              (unsigned long __user *)data);
+               ret = put_user(task_thread_info(child)->tp_value, datap);
                break;
 
        default:
index 6fe7c38cd5569de6c7eb8750f1f69be59422888f..6709fb70733559596d0cfcf0685288cd29741991 100644 (file)
@@ -112,9 +112,12 @@ void ptrace_disable(struct task_struct *child)
        user_disable_single_step(child);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       int regno = addr >> 2;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
                /* read the word at location addr in the USER area. */
@@ -122,53 +125,48 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        unsigned long tmp;
                        
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 ||
-                           addr > sizeof(struct user) - 3)
+                       if ((addr & 3) || addr > sizeof(struct user) - 3)
                                break;
                        
                        tmp = 0;  /* Default return condition */
-                       addr = addr >> 2; /* temporary hack. */
                        ret = -EIO;
-                       if (addr < 19) {
-                               tmp = get_reg(child, addr);
-                               if (addr == PT_SR)
+                       if (regno < 19) {
+                               tmp = get_reg(child, regno);
+                               if (regno == PT_SR)
                                        tmp >>= 16;
-                       } else if (addr >= 21 && addr < 49) {
-                               tmp = child->thread.fp[addr - 21];
-                       } else if (addr == 49) {
+                       } else if (regno >= 21 && regno < 49) {
+                               tmp = child->thread.fp[regno - 21];
+                       } else if (regno == 49) {
                                tmp = child->mm->start_code;
-                       } else if (addr == 50) {
+                       } else if (regno == 50) {
                                tmp = child->mm->start_data;
-                       } else if (addr == 51) {
+                       } else if (regno == 51) {
                                tmp = child->mm->end_code;
                        } else
                                break;
-                       ret = put_user(tmp,(unsigned long *) data);
+                       ret = put_user(tmp, datap);
                        break;
                }
 
                case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                        ret = -EIO;
-                       if ((addr & 3) || addr < 0 ||
-                           addr > sizeof(struct user) - 3)
+                       if ((addr & 3) || addr > sizeof(struct user) - 3)
                                break;
 
-                       addr = addr >> 2; /* temporary hack. */
-                           
-                       if (addr == PT_SR) {
+                       if (regno == PT_SR) {
                                data &= SR_MASK;
                                data <<= 16;
                                data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
                        }
-                       if (addr < 19) {
-                               if (put_reg(child, addr, data))
+                       if (regno < 19) {
+                               if (put_reg(child, regno, data))
                                        break;
                                ret = 0;
                                break;
                        }
-                       if (addr >= 21 && addr < 48)
+                       if (regno >= 21 && regno < 48)
                        {
-                               child->thread.fp[addr - 21] = data;
+                               child->thread.fp[regno - 21] = data;
                                ret = 0;
                        }
                        break;
@@ -180,11 +178,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                            tmp = get_reg(child, i);
                            if (i == PT_SR)
                                tmp >>= 16;
-                           if (put_user(tmp, (unsigned long *) data)) {
+                           if (put_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
@@ -194,7 +192,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        int i;
                        unsigned long tmp;
                        for (i = 0; i < 19; i++) {
-                           if (get_user(tmp, (unsigned long *) data)) {
+                           if (get_user(tmp, datap)) {
                                ret = -EFAULT;
                                break;
                            }
@@ -204,7 +202,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
                            }
                            put_reg(child, i, tmp);
-                           data += sizeof(long);
+                           datap++;
                        }
                        ret = 0;
                        break;
@@ -213,7 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_GETFPREGS
                case PTRACE_GETFPREGS: { /* Get the child FPU state. */
                        ret = 0;
-                       if (copy_to_user((void *)data, &child->thread.fp,
+                       if (copy_to_user(datap, &child->thread.fp,
                                         sizeof(struct user_m68kfp_struct)))
                                ret = -EFAULT;
                        break;
@@ -223,7 +221,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_SETFPREGS
                case PTRACE_SETFPREGS: { /* Set the child FPU state. */
                        ret = 0;
-                       if (copy_from_user(&child->thread.fp, (void *)data,
+                       if (copy_from_user(&child->thread.fp, datap,
                                           sizeof(struct user_m68kfp_struct)))
                                ret = -EFAULT;
                        break;
@@ -231,8 +229,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #endif
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(task_thread_info(child)->tp_value,
-                              (unsigned long __user *)data);
+               ret = put_user(task_thread_info(child)->tp_value, datap);
                break;
 
                default:
index dc03ffc8174a9ee632c892105a1331c93c02dcd8..05ac8cc975d53192e80557b15b9ea0c3013a02f8 100644 (file)
@@ -73,7 +73,8 @@ static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
        return (microblaze_reg_t *)((char *)regs + reg_offs);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int rval;
        unsigned long val = 0;
@@ -99,7 +100,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        } else {
                                rval = -EIO;
                        }
-               } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
+               } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
                        microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
                        if (request == PTRACE_PEEKUSR)
                                val = *reg_addr;
index 5f4b9d4e4114febd5679202355afe7a35ba4670b..f1f508e4f97117743e392ee8a59fe47238989f9f 100644 (file)
@@ -839,7 +839,7 @@ struct bridge_controller {
        nasid_t                 nasid;
        unsigned int            widget_id;
        unsigned int            irq_cpu;
-       dma64_addr_t            baddr;
+       u64                     baddr;
        unsigned int            pci_int[8];
 };
 
index c8777333e19833667fe882110fe40d954fee5eeb..d21c388c0116f06abfee3ebef3d9764245d818ed 100644 (file)
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
        return 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       void __user *addrp = (void __user *) addr;
+       void __user *datavp = (void __user *) data;
+       unsigned long __user *datalp = (void __user *) data;
 
        switch (request) {
        /* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        ret = -EIO;
                        goto out;
                }
-               ret = put_user(tmp, (unsigned long __user *) data);
+               ret = put_user(tmp, datalp);
                break;
        }
 
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (__s64 __user *) data);
+               ret = ptrace_getregs(child, datavp);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (__s64 __user *) data);
+               ret = ptrace_setregs(child, datavp);
                break;
 
        case PTRACE_GETFPREGS:
-               ret = ptrace_getfpregs(child, (__u32 __user *) data);
+               ret = ptrace_getfpregs(child, datavp);
                break;
 
        case PTRACE_SETFPREGS:
-               ret = ptrace_setfpregs(child, (__u32 __user *) data);
+               ret = ptrace_setfpregs(child, datavp);
                break;
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(task_thread_info(child)->tp_value,
-                               (unsigned long __user *) data);
+               ret = put_user(task_thread_info(child)->tp_value, datalp);
                break;
 
        case PTRACE_GET_WATCH_REGS:
-               ret = ptrace_get_watch_regs(child,
-                                       (struct pt_watch_regs __user *) addr);
+               ret = ptrace_get_watch_regs(child, addrp);
                break;
 
        case PTRACE_SET_WATCH_REGS:
-               ret = ptrace_set_watch_regs(child,
-                                       (struct pt_watch_regs __user *) addr);
+               ret = ptrace_set_watch_regs(child, addrp);
                break;
 
        default:
index 1e69b1fb4b85c16e1a1c5f07bc6f0e054e5196cc..3634c7ea06ac194de365370829080fa3ddc31858 100644 (file)
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 #ifdef CONFIG_DEBUG_HIGHMEM
        {
                int idx = type + KM_TYPE_NR * smp_processor_id();
@@ -89,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                local_flush_tlb_one(vaddr);
        }
 #endif
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 7c2a2f7f8dc143889b74605d2741f5707ad330e4..365766a3d536025ad0692643edbad8ffb6fee0d5 100644 (file)
@@ -9,8 +9,19 @@ config MN10300
        def_bool y
        select HAVE_OPROFILE
 
-config AM33
-       def_bool y
+config AM33_2
+       def_bool n
+
+config AM33_3
+       def_bool n
+
+config AM34_2
+       def_bool n
+       select MN10300_HAS_ATOMIC_OPS_UNIT
+       select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+       def_bool y if AM33_3 || AM34_2
 
 config MMU
        def_bool y
@@ -37,7 +48,7 @@ config GENERIC_CALIBRATE_DELAY
        def_bool y
 
 config GENERIC_CMOS_UPDATE
-        def_bool y
+        def_bool n
 
 config GENERIC_FIND_NEXT_BIT
        def_bool y
@@ -45,6 +56,27 @@ config GENERIC_FIND_NEXT_BIT
 config GENERIC_HWEIGHT
        def_bool y
 
+config GENERIC_TIME
+       def_bool y
+
+config GENERIC_CLOCKEVENTS
+       def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+       bool
+
+config CEVT_MN10300
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+       def_bool y
+       depends on GENERIC_TIME
+
 config GENERIC_BUG
        def_bool y
 
@@ -61,18 +93,14 @@ config GENERIC_HARDIRQS
 config HOTPLUG_CPU
        def_bool n
 
-config HZ
-       int
-       default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
+mainmenu "Panasonic MN10300/AM33 Kernel Configuration"
 
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
 
 
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
 
 choice
        prompt "Unit type"
@@ -87,6 +115,10 @@ config MN10300_UNIT_ASB2303
 config MN10300_UNIT_ASB2305
        bool "ASB2305"
 
+config MN10300_UNIT_ASB2364
+       bool "ASB2364"
+       select SMSC911X_ARCH_HOOKS if SMSC911X
+
 endchoice
 
 choice
@@ -99,57 +131,51 @@ choice
 config MN10300_PROC_MN103E010
        bool "MN103E010"
        depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+       select AM33_2
+       select MN10300_PROC_HAS_TTYSM0
+       select MN10300_PROC_HAS_TTYSM1
+       select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+       bool "MN2WS0050"
+       depends on MN10300_UNIT_ASB2364
+       select AM34_2
        select MN10300_PROC_HAS_TTYSM0
        select MN10300_PROC_HAS_TTYSM1
        select MN10300_PROC_HAS_TTYSM2
 
 endchoice
 
-choice
-       prompt "Processor core support"
-       default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+       def_bool n
        help
-         This option specifies the processor core for which the kernel will be
-         compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
-       bool "AM33v2"
-
-endchoice
+         This should be enabled if the processor has an atomic ops unit
+         capable of doing LL/SC equivalent operations.
 
 config FPU
        bool "FPU present"
        default y
-       depends on MN10300_PROC_MN103E010
+       depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
 
-choice
-       prompt "CPU Caching mode"
-       default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+       bool "Save FPU state lazily"
+       default y
+       depends on FPU && !SMP
        help
-         This option determines the caching mode for the kernel.
-
-         Write-Back caching mode involves the all reads and writes causing
-         the affected cacheline to be read into the cache first before being
-         operated upon. Memory is not then updated by a write until the cache
-         is filled and a cacheline needs to be displaced from the cache to
-         make room. Only at that point is it written back.
-
-         Write-Through caching only fetches cachelines from memory on a
-         read. Writes always get written directly to memory. If the affected
-         cacheline is also in cache, it will be updated too.
-
-         The final option is to turn of caching entirely.
+         Enable this to be lazy in the saving of the FPU state to the owning
+         task's thread struct.  This is useful if most tasks on the system
+         don't use the FPU as only those tasks that use it will pass it
+         between them, and the state needn't be saved for a task that isn't
+         using it.
 
-config MN10300_CACHE_WBACK
-       bool "Write-Back"
+         This can't be so easily used on SMP as the process that owns the FPU
+         state on a CPU may be currently running on another CPU, so for the
+         moment, it is disabled.
 
-config MN10300_CACHE_WTHRU
-       bool "Write-Through"
+source "arch/mn10300/mm/Kconfig.cache"
 
-config MN10300_CACHE_DISABLED
-       bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+       def_bool y
 
 menu "Memory layout options"
 
@@ -170,24 +196,55 @@ config KERNEL_TEXT_ADDRESS
 
 config KERNEL_ZIMAGE_BASE_ADDRESS
        hex "Base address of compressed vmlinux image"
-       default "0x90700000"
+       default "0x50700000"
 
+config BOOT_STACK_OFFSET
+       hex
+       default "0xF00" if SMP
+       default "0xFF0" if !SMP
+
+config BOOT_STACK_SIZE
+       hex
+       depends on SMP
+       default "0x100"
 endmenu
 
-config PREEMPT
-       bool "Preemptible Kernel"
-       help
-         This option reduces the latency of the kernel when reacting to
-         real-time or interactive events by allowing a low priority process to
-         be preempted even if it is in kernel mode executing a system call.
-         This allows applications to run more reliably even when the system is
-         under load.
+config SMP
+       bool "Symmetric multi-processing support"
+       default y
+       depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+       ---help---
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, like most personal computers, say N. If
+         you have a system with more than one CPU, say Y.
+
+         If you say N here, the kernel will run on single and multiprocessor
+         machines, but will use only one CPU of a multiprocessor machine. If
+         you say Y here, the kernel will run on many, but not all,
+         singleprocessor machines. On a singleprocessor machine, the kernel
+         will run faster if you say N here.
 
-         Say Y here if you are building a kernel for a desktop, embedded
-         or real-time system.  Say N if you are unsure.
+         See also <file:Documentation/i386/IO-APIC.txt>,
+         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <http://www.tldp.org/docs.html#howto>.
+
+         If you don't know what to do here, say N.
+
+config NR_CPUS
+       int
+       depends on SMP
+       default "2"
+
+config USE_GENERIC_SMP_HELPERS
+       bool
+       depends on SMP
+       default y
+
+source "kernel/Kconfig.preempt"
 
 config MN10300_CURRENT_IN_E2
        bool "Hold current task address in E2 register"
+       depends on !SMP
        default y
        help
          This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +266,15 @@ config MN10300_USING_JTAG
          suppresses the use of certain hardware debugging features, such as
          single-stepping, which are taken over completely by the JTAG unit.
 
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
 config MN10300_RTC
        bool "Using MN10300 RTC"
-       depends on MN10300_PROC_MN103E010
+       depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+       select GENERIC_CMOS_UPDATE
        default n
        help
-
          This option enables support for the RTC, thus enabling time to be
          tracked, even when system is powered down. This is available on-chip
          on the MN103E010.
@@ -306,14 +366,23 @@ config MN10300_TTYSM1
 
 choice
        prompt "Select the timer to supply the clock for SIF1"
-       default MN10300_TTYSM0_TIMER9
+       default MN10300_TTYSM1_TIMER12 \
+               if !(AM33_2 || AM33_3)
+       default MN10300_TTYSM1_TIMER9 \
+               if AM33_2 || AM33_3
        depends on MN10300_TTYSM1
 
+config MN10300_TTYSM1_TIMER12
+       bool "Use timer 12 (16-bit)"
+       depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM1_TIMER9
        bool "Use timer 9 (16-bit)"
+       depends on AM33_2 || AM33_3
 
 config MN10300_TTYSM1_TIMER3
        bool "Use timer 3 (8-bit)"
+       depends on AM33_2 || AM33_3
 
 endchoice
 
@@ -328,17 +397,107 @@ config MN10300_TTYSM2
 
 choice
        prompt "Select the timer to supply the clock for SIF2"
-       default MN10300_TTYSM0_TIMER10
+       default MN10300_TTYSM2_TIMER3 \
+               if !(AM33_2 || AM33_3)
+       default MN10300_TTYSM2_TIMER10 \
+               if AM33_2 || AM33_3
        depends on MN10300_TTYSM2
 
+config MN10300_TTYSM2_TIMER9
+       bool "Use timer 9 (16-bit)"
+       depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+       bool "Use timer 1 (8-bit)"
+       depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+       bool "Use timer 3 (8-bit)"
+       depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM2_TIMER10
        bool "Use timer 10 (16-bit)"
+       depends on AM33_2 || AM33_3
 
 endchoice
 
 config MN10300_TTYSM2_CTS
        bool "Enable the use of the CTS line /dev/ttySM2"
-       depends on MN10300_TTYSM2
+       depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+       int "GDBSTUB interrupt priority"
+       depends on GDBSTUB
+       range 0 1 if LINUX_CLI_LEVEL = 2
+       range 0 2 if LINUX_CLI_LEVEL = 3
+       range 0 3 if LINUX_CLI_LEVEL = 4
+       range 0 4 if LINUX_CLI_LEVEL = 5
+       range 0 5 if LINUX_CLI_LEVEL = 6
+       default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+       int "MN10300 on-chip serial interrupt priority"
+       depends on MN10300_TTYSM
+       range 1 1 if LINUX_CLI_LEVEL = 2
+       range 1 2 if LINUX_CLI_LEVEL = 3
+       range 1 3 if LINUX_CLI_LEVEL = 4
+       range 1 4 if LINUX_CLI_LEVEL = 5
+       range 1 5 if LINUX_CLI_LEVEL = 6
+       default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+       int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+       range 2 6
+       default 2
+       help
+         local_irq_disable() doesn't actually disable maskable interrupts -
+         what it does is restrict the levels of interrupt which are permitted
+         (a lower level indicates a higher priority) by lowering the value in
+         EPSW.IM from 7.  Any interrupt is permitted for which the level is
+         lower than EPSW.IM.
+
+         Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+         serial DMA interrupts are allowed to interrupt normal disabled
+         sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+       int "Kernel timer interrupt priority"
+       range LINUX_CLI_LEVEL 6
+       default 4
+
+config PCI_IRQ_LEVEL
+       int "PCI interrupt priority"
+       depends on PCI
+       range LINUX_CLI_LEVEL 6
+       default 5
+
+config ETHERNET_IRQ_LEVEL
+       int "Ethernet interrupt priority"
+       depends on SMC91X || SMC911X || SMSC911X
+       range LINUX_CLI_LEVEL 6
+       default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+       int "External serial port interrupt priority"
+       depends on SERIAL_8250
+       range LINUX_CLI_LEVEL 6
+       default 6
 
 endmenu
 
index ac5c6bdb2f05c6535ab802cf34000f206995badb..7120282bf0d89c3240886cbad32b973bda1bfc6f 100644 (file)
@@ -36,6 +36,9 @@ endif
 ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
 PROCESSOR      := mn103e010
 endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR      := mn2ws0050
+endif
 
 ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
 UNIT           := asb2303
@@ -43,6 +46,9 @@ endif
 ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
 UNIT           := asb2305
 endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT           := asb2364
+endif
 
 
 head-y         := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
index 502e1eb56709685ce7e698e6a93bfb9ddb50e795..7b50345b9e840dccc0a299efed88c402324a9da2 100644 (file)
 
 #include <linux/linkage.h>
 #include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
 
        .globl startup_32
 startup_32:
-       # first save off parameters from bootloader
+#ifdef CONFIG_SMP
+       #
+       # Secondary CPUs jump directly to the kernel entry point
+       #
+       # Must save primary CPU's D0-D2 registers as they hold boot parameters
+       #
+       mov     (CPUID), d3
+       and     CPUID_MASK,d3
+       beq     startup_primary
+       mov     CONFIG_KERNEL_TEXT_ADDRESS,a0
+       jmp     (a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+       # first save parameters from bootloader
        mov     param_save_area,a0
        mov     d0,(a0)
        mov     d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
        mov     (a0),d0
        btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy
        lne
-       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0   # writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
        movhu   d0,(a0)                                 # enable
+#endif /* !ENABLED */
 
        # clear the BSS area
        mov     __bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
 
        # decompress the kernel
        call    decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       call    mn10300_dcache_flush_inv[],0
+#endif
 
        # disable caches again
        mov     CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
        mov     (4,a0),d1
        mov     (8,a0),d2
 
+       # jump to the kernel proper entry point
        mov     a3,sp
        mov     CONFIG_KERNEL_TEXT_ADDRESS,a0
        jmp     (a0)
 
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_dcache_flush_inv_end
+
+       mov     L1_CACHE_NENTRIES,d1
+       clr     a1
+
+mn10300_dcache_flush_inv_loop:
+       mov     (DCACHE_PURGE_WAY0(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY1(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY2(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY3(0),a1),d0    # unconditional purge
+
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+       ret     [],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
        .data
        .align          4
 param_save_area:
index d80dfcb2c902302ce7e3b28004dd74b121df7bda..3f749b69ca71b732081ff166a35c1ec8cca43fc3 100644 (file)
@@ -12,6 +12,8 @@ CONFIG_SLAB=y
 CONFIG_PROFILING=y
 # CONFIG_BLOCK is not set
 CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_MN10300_RTC=y
 CONFIG_MN10300_TTYSM_CONSOLE=y
 CONFIG_MN10300_TTYSM0=y
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
new file mode 100644 (file)
index 0000000..83ce2f2
--- /dev/null
@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
index f0cc1f84a72f180c5492987327b855b36bf46561..92d2f9298e3832155b14bf21d1eb964e015b87f5 100644 (file)
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+       unsigned long status;
+       unsigned long oldval;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       mov     %5,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(oldval), "=m"(*m)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+               : "memory", "cc");
+
+       return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+                                     unsigned long old, unsigned long new)
+{
+       unsigned long status;
+       unsigned long oldval;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       cmp     %5,%1           \n"
+               "       bne     2f              \n"
+               "       mov     %6,(_ADR,%3)    \n"
+               "2:     mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(oldval), "=m"(*m)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+                 "r"(old), "r"(new)
+               : "memory", "cc");
+
+       return oldval;
+}
+#else  /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else  /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+       unsigned long oldval;
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       oldval = *m;
+       *m = val;
+       arch_local_irq_restore(flags);
+       return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+                                     unsigned long old, unsigned long new)
+{
+       unsigned long oldval;
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       oldval = *m;
+       if (oldval == old)
+               *m = new;
+       arch_local_irq_restore(flags);
+       return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v)                                           \
+       ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),    \
+                                    (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n)                                     \
+       ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+                                       (unsigned long)(o),     \
+                                       (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
 #include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       int retval;
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       add     %5,%1           \n"
+               "       mov     %1,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+               : "memory", "cc");
+
+#else
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       retval = v->counter;
+       retval += i;
+       v->counter = retval;
+       arch_local_irq_restore(flags);
+#endif
+       return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       int retval;
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       sub     %5,%1           \n"
+               "       mov     %1,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+               : "memory", "cc");
+
+#else
+       unsigned long flags;
+       flags = arch_local_cli_save();
+       retval = v->counter;
+       retval -= i;
+       v->counter = retval;
+       arch_local_irq_restore(flags);
+#endif
+       return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+       atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+       atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u)                             \
+({                                                             \
+       int c, old;                                             \
+       c = atomic_read(v);                                     \
+       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+               c = old;                                        \
+       c != (u);                                               \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %3,(_AAR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"
+               "       and     %4,%0           \n"
+               "       mov     %0,(_ADR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"     /* flush */
+               "       mov     (_ASR,%2),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=m"(*addr)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+               : "memory", "cc");
+#else
+       unsigned long flags;
+
+       mask = ~mask;
+       flags = arch_local_cli_save();
+       *addr &= mask;
+       arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %3,(_AAR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"
+               "       or      %4,%0           \n"
+               "       mov     %0,(_ADR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"     /* flush */
+               "       mov     (_ASR,%2),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=m"(*addr)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+               : "memory", "cc");
+#else
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       *addr |= mask;
+       arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
index 3f50e966107641f21f346a38e50cca97d2eda24b..3b8a868188f59e88f2926d48298b54c188f318f2 100644 (file)
@@ -57,7 +57,7 @@
 #define clear_bit(nr, addr) ___clear_bit((nr), (addr))
 
 
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
 {
        unsigned int *a = (unsigned int *) addr;
        int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
 /*
  * test bit
  */
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
 {
-       return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+       return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
 }
 
 /*
  * change bit
  */
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
 {
        int     mask;
        unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
        *a ^= mask;
 }
 
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
 
 /*
  * test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
 /*
  * test and change bit
  */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        int     mask, retval;
        unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
        return retval;
 }
 
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
 
 #include <asm-generic/bitops/lock.h>
 
index 781bf613366d747dae1a5cbb9924970f65bbe4c9..f29cde2cfc91af51b21b1644c92eaa974266eb65 100644 (file)
 
 /* instruction cache access registers */
 #define ICACHE_DATA(WAY, ENTRY, OFF) \
-       __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+       __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define ICACHE_TAG(WAY, ENTRY)  \
-       __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+       __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES, u32)
 
-/* instruction cache access registers */
+/* data cache access registers */
 #define DCACHE_DATA(WAY, ENTRY, OFF) \
-       __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+       __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define DCACHE_TAG(WAY, ENTRY)  \
-       __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+       __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES, u32)
 
 #endif /* _ASM_CACHE_H */
index 29e692f7f030423076d2dd7d09ad765b8c350beb..faed90240dedd1f3f88ba34105a268efa1dd6fd2 100644 (file)
 #include <linux/mm.h>
 
 /*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
  */
-#define flush_cache_all()                      do {} while (0)
-#define flush_cache_mm(mm)                     do {} while (0)
-#define flush_cache_dup_mm(mm)                 do {} while (0)
-#define flush_cache_range(mm, start, end)      do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
-#define flush_cache_vmap(start, end)           do {} while (0)
-#define flush_cache_vunmap(start, end)         do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                        do {} while (0)
-#define flush_dcache_mmap_lock(mapping)                do {} while (0)
-#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end)         do {} while (0)
-#define flush_icache_page(vma, pg)             do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
-       flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       do {                                    \
-               memcpy(dst, src, len);          \
-               flush_icache_page(vma, page);   \
-       } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
 #ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 #else
+#define mn10300_local_dcache_flush()                   do {} while (0)
+#define mn10300_local_dcache_flush_page(start)         do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)   do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+               mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+               mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+               mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+               mn10300_local_dcache_inv_range2(start, size)
 #define mn10300_dcache_flush()                         do {} while (0)
 #define mn10300_dcache_flush_page(start)               do {} while (0)
 #define mn10300_dcache_flush_range(start, end)         do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
        mn10300_dcache_inv_range2((start), (size))
 #endif /* CONFIG_MN10300_CACHE_WBACK */
 #else
+#define mn10300_local_icache_inv()                     do {} while (0)
+#define mn10300_local_icache_inv_page(start)           do {} while (0)
+#define mn10300_local_icache_inv_range(start, end)     do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size)   do {} while (0)
+#define mn10300_local_dcache_inv()                     do {} while (0)
+#define mn10300_local_dcache_inv_page(start)           do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end)     do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size)   do {} while (0)
+#define mn10300_local_dcache_flush()                   do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start)     do {} while (0)
+#define mn10300_local_dcache_flush_inv()               do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start)         do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)   do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
 #define mn10300_icache_inv()                           do {} while (0)
+#define mn10300_icache_inv_page(start)                 do {} while (0)
+#define mn10300_icache_inv_range(start, end)           do {} while (0)
+#define mn10300_icache_inv_range2(start, size)         do {} while (0)
 #define mn10300_dcache_inv()                           do {} while (0)
 #define mn10300_dcache_inv_page(start)                 do {} while (0)
 #define mn10300_dcache_inv_range(start, end)           do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
 #define mn10300_dcache_flush_page(start)               do {} while (0)
 #define mn10300_dcache_flush_range(start, end)         do {} while (0)
 #define mn10300_dcache_flush_range2(start, size)       do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all()                      do {} while (0)
+#define flush_cache_mm(mm)                     do {} while (0)
+#define flush_cache_dup_mm(mm)                 do {} while (0)
+#define flush_cache_range(mm, start, end)      do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
+#define flush_cache_vmap(start, end)           do {} while (0)
+#define flush_cache_vunmap(start, end)         do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)                        do {} while (0)
+#define flush_dcache_mmap_lock(mapping)                do {} while (0)
+#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+                                    struct page *page)
+{
+       mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end)         do {} while (0)
+#define flush_icache_page(vma, pg)             do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+       flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+       do {                                    \
+               memcpy(dst, src, len);          \
+               flush_icache_page(vma, page);   \
+       } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+       memcpy(dst, src, len)
 
 /*
- * internal debugging function
+ * Internal debugging function
  */
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern void kernel_map_pages(struct page *page, int numpages, int enable);
index 757e9b5388ea5bec4230fad2d44f78e812e68ff6..90ed4a365c97cb57c0a2d071ecd1357ca453b48b 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #endif
 
-#ifdef CONFIG_MN10300_CPU_AM33V2
 /* we tell the compiler to pretend to be AM33 so that it doesn't try and use
  * the FP regs, but tell the assembler that we're actually allowed AM33v2
  * instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
 #else
 .am33_2
 #endif
-#endif
 
 #ifdef __KERNEL__
 
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
 #define EPSW_nAR               0x00040000      /* register bank control */
 #define EPSW_ML                        0x00080000      /* monitor level */
 #define EPSW_FE                        0x00100000      /* FPU enable */
+#define EPSW_IM_SHIFT          8               /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num)       ((num) << EPSW_IM_SHIFT)
 
 /* FPU registers */
 #define FPCR_EF_I              0x00000001      /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
 #define CPUREV                 __SYSREGC(0xc0000050, u32)      /* CPU revision register */
 #define CPUREV_TYPE            0x0000000f      /* CPU type */
 #define CPUREV_TYPE_S          0
-#define CPUREV_TYPE_AM33V1     0x00000000      /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2     0x00000001      /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1     0x00000002      /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1     0x00000000      /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2     0x00000001      /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1     0x00000002      /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3     0x00000003      /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2     0x00000004      /* - AM34-2 core, AM33/3.00 arch */
 #define CPUREV_REVISION                0x000000f0      /* CPU revision */
 #define CPUREV_REVISION_S      4
 #define CPUREV_ICWAY           0x00000f00      /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
 #define CHCTR_ICWMD            0x0f00          /* instruction cache way mode */
 #define CHCTR_DCWMD            0xf000          /* data cache way mode */
 
+#ifdef CONFIG_AM34_2
+#define ICIVCR                 __SYSREG(0xc0000c00, u32)       /* icache area invalidate control */
+#define ICIVCR_ICIVBSY         0x00000008                      /* icache area invalidate busy */
+#define ICIVCR_ICI             0x00000001                      /* icache area invalidate */
+
+#define ICIVMR                 __SYSREG(0xc0000c04, u32)       /* icache area invalidate mask */
+
+#define        DCPGCR                  __SYSREG(0xc0000c10, u32)       /* data cache area purge control */
+#define        DCPGCR_DCPGBSY          0x00000008                      /* data cache area purge busy */
+#define        DCPGCR_DCP              0x00000002                      /* data cache area purge */
+#define        DCPGCR_DCI              0x00000001                      /* data cache area invalidate */
+
+#define        DCPGMR                  __SYSREG(0xc0000c14, u32)       /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
 /* MMU control registers */
 #define MMUCTR                 __SYSREG(0xc0000090, u32)       /* MMU control register */
 #define MMUCTR_IRP             0x0000003f      /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
 #define MMUCTR_DTL_LOCK0_3     0x03000000      /* - entry 0-3 locked */
 #define MMUCTR_DTL_LOCK0_7     0x04000000      /* - entry 0-7 locked */
 #define MMUCTR_DTL_LOCK0_15    0x05000000      /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE             0x80000000      /* write-through cache TLB entry bit enable */
+#endif
 
 #define PIDR                   __SYSREG(0xc0000094, u16)       /* PID register */
 #define PIDR_PID               0x00ff          /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
 #define xPTEL_PS_4Mb           0x00000c00      /* - 4Mb page */
 #define xPTEL_PPN              0xfffff006      /* physical page number */
 
-#define xPTEL_V_BIT            0       /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT      1
-#define xPTEL_UNUSED2_BIT      2
-#define xPTEL_C_BIT            3
-#define xPTEL_PV_BIT           4
-#define xPTEL_D_BIT            5
-#define xPTEL_G_BIT            9
-
 #define IPTEU                  __SYSREG(0xc00000a4, u32)       /* instruction TLB virtual addr */
 #define DPTEU                  __SYSREG(0xc00000b4, u32)       /* data TLB virtual addr */
 #define xPTEU_VPN              0xfffffc00      /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
 #define xPTEL2_PS_128Kb                0x00000100      /* - 128Kb page */
 #define xPTEL2_PS_1Kb          0x00000200      /* - 1Kb page */
 #define xPTEL2_PS_4Mb          0x00000300      /* - 4Mb page */
-#define xPTEL2_PPN             0xfffffc00      /* physical page number */
+#define xPTEL2_CWT             0x00000400      /* cacheable write-through */
+#define xPTEL2_UNUSED1         0x00000800      /* unused bit (broadcast mask) */
+#define xPTEL2_PPN             0xfffff000      /* physical page number */
+
+#define xPTEL2_V_BIT           0       /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT           1
+#define xPTEL2_PV_BIT          2
+#define xPTEL2_D_BIT           3
+#define xPTEL2_G_BIT           7
+#define xPTEL2_UNUSED1_BIT     11
 
 #define MMUFCR                 __SYSREGC(0xc000009c, u32)      /* MMU exception cause */
 #define MMUFCR_IFC             __SYSREGC(0xc000009c, u16)      /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
 #define MMUFCR_xFC_PR_RWK_RWU  0x01c0          /* - R/W kernel and R/W user */
 #define MMUFCR_xFC_ILLADDR     0x0200          /* illegal address excep flag */
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR            __SYSREG(0xc0000a00, u32)       /* cacheable address */
+#define AAR2           __SYSREG(0xc0000a04, u32)       /* uncacheable address */
+#define ADR            __SYSREG(0xc0000a08, u32)       /* data */
+#define ASR            __SYSREG(0xc0000a0c, u32)       /* status */
+#define AARU           __SYSREG(0xd400aa00, u32)       /* user address */
+#define ADRU           __SYSREG(0xd400aa08, u32)       /* user data */
+#define ASRU           __SYSREG(0xd400aa0c, u32)       /* user status */
+
+#define ASR_RW         0x00000008      /* read */
+#define ASR_BW         0x00000004      /* bus error */
+#define ASR_IW         0x00000002      /* interrupt */
+#define ASR_LW         0x00000001      /* bus lock */
+
+#define ASRU_RW                ASR_RW          /* read */
+#define ASRU_BW                ASR_BW          /* bus error */
+#define ASRU_IW                ASR_IW          /* interrupt */
+#define ASRU_LW                ASR_LW          /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+       "_AAR   = 0\n"
+       "_AAR2  = 4\n"
+       "_ADR   = 8\n"
+       "_ASR   = 12\n");
+#else
+#define _AAR           0
+#define _AAR2          4
+#define _ADR           8
+#define _ASR           12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR  0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_CPU_REGS_H */
index 58a199da0f4a801f8f4418a01e964a647fa153f2..80337b339c903fecf8544498b154157babeae2b8 100644 (file)
 #ifndef _ASM_DMACTL_REGS_H
 #define _ASM_DMACTL_REGS_H
 
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define        DMxCTR(N)               __SYSREG(0xd2000000 + ((N) * 0x100), u32)       /* control reg */
-#define        DMxCTR_BG               0x0000001f      /* transfer request source */
-#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
-#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
-#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
-#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
-#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
-#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
-#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
-#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
-#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
-#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
-#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
-#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
-#define        DMxCTR_BG_AFE           0x0000000d      /* - analogue front-end interrupt source */
-#define        DMxCTR_BG_ADC           0x0000000e      /* - A/D conversion end interrupt source */
-#define        DMxCTR_BG_IRDA          0x0000000f      /* - IrDA interrupt source */
-#define        DMxCTR_BG_RTC           0x00000010      /* - RTC interrupt source */
-#define        DMxCTR_BG_XIRQ0         0x00000011      /* - XIRQ0 pin interrupt source */
-#define        DMxCTR_BG_XIRQ1         0x00000012      /* - XIRQ1 pin interrupt source */
-#define        DMxCTR_BG_XDMR0         0x00000013      /* - external request 0 source (XDMR0 pin) */
-#define        DMxCTR_BG_XDMR1         0x00000014      /* - external request 1 source (XDMR1 pin) */
-#define        DMxCTR_SAM              0x000000e0      /* DMA transfer src addr mode */
-#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
-#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
-#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
-#define        DMxCTR_DAM              0x00000000      /* DMA transfer dest addr mode */
-#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
-#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
-#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
-#define        DMxCTR_TM               0x00001800      /* DMA transfer mode */
-#define        DMxCTR_TM_BATCH         0x00000000      /* - batch transfer */
-#define        DMxCTR_TM_INTERM        0x00001000      /* - intermittent transfer */
-#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
-#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
-#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
-#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
-#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
-#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
-#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
-#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
-#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
-#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
-#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
-#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
-#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
-
-#define        DMxSRC(N)               __SYSREG(0xd2000004 + ((N) * 0x100), u32)       /* control reg */
-
-#define        DMxDST(N)               __SYSREG(0xd2000008 + ((N) * 0x100), u32)       /* src addr reg */
-
-#define        DMxSIZ(N)               __SYSREG(0xd200000c + ((N) * 0x100), u32)       /* dest addr reg */
-#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
-
-#define        DMxCYC(N)               __SYSREG(0xd2000010 + ((N) * 0x100), u32)       /* intermittent
-                                                                                * size reg */
-#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
-
-#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
-#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
-#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
-#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
-
-#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
-#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
-#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
-#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
-       u32             ctr;
-       const void      *src;
-       void            *dst;
-       u32             siz;
-       u32             cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
 
 #endif /* _ASM_DMACTL_REGS_H */
index e5fa97cd9a147222c567b39f1e8bd01cec18ff4a..8157c9267f426ac7dabdd6f0dcef9a67bd2ff5a3 100644 (file)
 #define R_MN10300_SYM_DIFF     33      /* Adjustment when relaxing. */
 #define R_MN10300_ALIGN        34      /* Alignment requirement. */
 
+/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT   1       /* Has AM34 Atomic Operations */
+
+
 /*
  * ELF register definitions..
  */
@@ -47,8 +53,6 @@ typedef struct {
        u_int32_t       fpcr;
 } elf_fpregset_t;
 
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
 /*
  * This is used to ensure we don't load something for the wrong architecture
  */
@@ -130,7 +134,11 @@ do {                                               \
  * instruction set this CPU supports.  This could be done in user space,
  * but it's not easy, and we've already done it here.
  */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP      (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
 #define ELF_HWCAP      (0)
+#endif
 
 /*
  * This yields a string that ld.so will use to load implementation
index fa16466ef3f93cb38a4b325e08c46a2d4d7f1146..ca3e20508c77556a77492b85da8697d8eba4ced6 100644 (file)
@@ -15,8 +15,8 @@
 
 /*
  * define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- *   (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ *   the same time.
  */
 #define GDBSTUB_BKPT           0xFF
 
@@ -90,7 +90,6 @@ enum exception_code {
 
 extern void __set_intr_stub(enum exception_code code, void *handler);
 extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
 
 struct pt_regs;
 
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
 extern asmlinkage void raw_bus_error(void);
 extern asmlinkage void double_fault(void);
 extern asmlinkage int  system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
 extern asmlinkage void uninitialised_exception(struct pt_regs *,
                                               enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
 
 extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
 
+#define NUM2EXCEP_IRQ_LEVEL(num)       (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_EXCEPTIONS_H */
index 64a2b83a7a6aefb09490f4e68a8a3d8f67fa7686..b7625de8eade6754edf14f5de0340d9a0aa76e57 100644 (file)
 #ifndef _ASM_FPU_H
 #define _ASM_FPU_H
 
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
 #include <asm/sigcontext.h>
-#include <asm/user.h>
 
 #ifdef __KERNEL__
 
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
 extern struct task_struct *fpu_state_owner;
+#endif
 
-#define set_using_fpu(tsk)                             \
-do {                                                   \
-       (tsk)->thread.fpu_flags |= THREAD_USING_FPU;    \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
 
-#define clear_using_fpu(tsk)                           \
-do {                                                   \
-       (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU;   \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+       asm volatile(
+               "bset %0,(0,%1)"
+               :
+               : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+               : "memory", "cc");
+}
 
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+       asm volatile(
+               "bclr %0,(0,%1)"
+               :
+               : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+               : "memory", "cc");
+}
 
-#define unlazy_fpu(tsk)                                        \
-do {                                                   \
-       preempt_disable();                              \
-       if (fpu_state_owner == (tsk))                   \
-               fpu_save(&tsk->thread.fpu_state);       \
-       preempt_enable();                               \
-} while (0)
-
-#define exit_fpu()                             \
-do {                                           \
-       struct task_struct *__tsk = current;    \
-       preempt_disable();                      \
-       if (fpu_state_owner == __tsk)           \
-               fpu_state_owner = NULL;         \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_fpu()                                    \
-do {                                                   \
-       struct task_struct *__tsk = current;            \
-       preempt_disable();                              \
-       if (fpu_state_owner == __tsk) {                 \
-               fpu_state_owner = NULL;                 \
-               __tsk->thread.uregs->epsw &= ~EPSW_FE;  \
-       }                                               \
-       preempt_enable();                               \
-       clear_using_fpu(__tsk);                         \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
 
-extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
 extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU  */
-
-/*
- * signal frame handlers
- */
 extern int fpu_setup_sigcontext(struct fpucontext *buf);
 extern int fpu_restore_sigcontext(struct fpucontext *buf);
 
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+       preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               fpu_save(&tsk->thread.fpu_state);
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#else
+       if (fpu_state_owner == tsk)
+               fpu_save(&tsk->thread.fpu_state);
+#endif
+       preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+       struct task_struct *tsk = current;
+
+       preempt_disable();
+       if (fpu_state_owner == tsk)
+               fpu_state_owner = NULL;
+       preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+       struct task_struct *tsk = current;
+
+       preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#else
+       if (fpu_state_owner == tsk) {
+               fpu_state_owner = NULL;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#endif
+       preempt_enable();
+       clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU  */
+
 #endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_FPU_H */
index 5b1949bdf039a85854594e728dfe100e009b7ee3..2ee58e3eb6b355b75166bf57334c01498a66b8fd 100644 (file)
@@ -18,6 +18,7 @@
 #ifndef __ASM_OFFSETS_H__
 #include <asm/asm-offsets.h>
 #endif
+#include <asm/thread_info.h>
 
 #define pi break
 
        movm    [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
        mov     sp,fp                           # FRAME pointer in A3
        add     -12,sp                          # allow for calls to be made
-       mov     (__frame),a1
-       mov     a1,(REG_NEXT,fp)
-       mov     fp,(__frame)
 
-       and     ~EPSW_FE,epsw                   # disable the FPU inside the kernel
+       # push the exception frame onto the front of the list
+       GET_THREAD_INFO a1
+       mov     (TI_frame,a1),a0
+       mov     a0,(REG_NEXT,fp)
+       mov     fp,(TI_frame,a1)
+
+       # disable the FPU inside the kernel
+       and     ~EPSW_FE,epsw
 
        # we may be holding current in E2
 #ifdef CONFIG_MN10300_CURRENT_IN_E2
 .macro RESTORE_ALL
        # peel back the stack to the calling frame
        # - this permits execve() to discard extra frames due to kernel syscalls
-       mov     (__frame),fp
+       GET_THREAD_INFO a0
+       mov     (TI_frame,a0),fp
        mov     fp,sp
-       mov     (REG_NEXT,fp),d0                # userspace has regs->next == 0
-       mov     d0,(__frame)
+       mov     (REG_NEXT,fp),d0
+       mov     d0,(TI_frame,a0)                # userspace has regs->next == 0
 
 #ifndef CONFIG_MN10300_USING_JTAG
        mov     (REG_EPSW,fp),d0
index 41ed267639649b8e91fae62b31a80f3ae9d55763..f5495ad82b77bc753b218b3a3498ea089e511134 100644 (file)
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void __gdbstub_bug_trap(void);
 extern asmlinkage void __gdbstub_pause(void);
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 extern asmlinkage void gdbstub_purge_cache(void);
 #else
 #define gdbstub_purge_cache()  do {} while (0)
index 54d950117674f60a9d7b920fba81c9c6fd8d65ee..0000d650b55f130892d59face4f3ff4fd1365d47 100644 (file)
 /* assembly code in softirq.h is sensitive to the offsets of these fields */
 typedef struct {
        unsigned int    __softirq_pending;
-       unsigned long   idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
        unsigned int    __nmi_count;    /* arch dependent */
        unsigned int    __irq_count;    /* arch dependent */
+#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
index f577ba2268caadc554d2cb34c5163394a83f901f..bfe2d88604d9041821f33c11019477ff47bda07d 100644 (file)
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
                BUG();
 #endif
        set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
-       __flush_tlb_one(vaddr);
+       local_flush_tlb_one(vaddr);
 
        return vaddr;
 }
@@ -101,7 +101,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 
 #if HIGHMEM_DEBUG
        {
@@ -116,9 +116,11 @@ static inline void __kunmap_atomic(unsigned long vaddr)
                 * this pte without first remap it
                 */
                pte_clear(kmap_pte - idx);
-               __flush_tlb_one(vaddr);
+               local_flush_tlb_one(vaddr);
        }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 #endif /* __KERNEL__ */
index ba544c796c5a4786c5dce521b1201ec79f5d62ff..585b708c2bc0620fb37b56df22a33df91e8ad5a1 100644 (file)
 
 #ifdef __KERNEL__
 
-/* interrupt controller registers */
-#define GxICR(X)               __SYSREG(0xd4000000 + (X) * 4, u16)     /* group irq ctrl regs */
-
-#define IAGR                   __SYSREG(0xd4000100, u16)       /* intr acceptance group reg */
-#define IAGR_GN                        0x00fc          /* group number register
-                                                * (documentation _has_ to be wrong)
-                                                */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X)                                               \
+       __SYSREG(0xd4000000 + (X) * 4 +                         \
+                (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
 
-#define EXTMD                  __SYSREG(0xd4000200, u16)       /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X)                                                    \
+       __SYSREG(0xd4000000 + (X) * 4 +                                 \
+                (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
 
-#define SET_XIRQ_TRIGGER(X,Y)                  \
-do {                                           \
-       u16 x = EXTMD;                          \
-       x &= ~(3 << ((X) * 2));                 \
-       x |= ((Y) & 3) << ((X) * 2);            \
-       EXTMD = x;                              \
-} while (0)
+#include <proc/intctl-regs.h>
 
 #define XIRQ_TRIGGER_LOWLEVEL  0
 #define XIRQ_TRIGGER_HILEVEL   1
@@ -59,10 +54,18 @@ do {                                                \
 #define GxICR_LEVEL_5          0x5000          /* - level 5 */
 #define GxICR_LEVEL_6          0x6000          /* - level 6 */
 #define GxICR_LEVEL_SHIFT      12
+#define GxICR_NMI              0x8000          /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num)   ((num) << GxICR_LEVEL_SHIFT)
 
 #ifndef __ASSEMBLY__
 extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
 #endif
 
 /* external interrupts */
index c1a4119e6497ee2b23a4460d3c2ef05294cec128..787255da744e2647a592b1aea12e20f3e76f83f9 100644 (file)
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
 #define iowrite32_rep(p, src, count) \
        outsl((unsigned long) (p), (src), (count))
 
+#define readsb(p, dst, count) \
+       insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+       insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+       insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+       outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+       outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+       outsl((unsigned long) (p), (src), (count))
 
 #define IO_SPACE_LIMIT 0xffffffff
 
index 25c045d16d1c49fd2d85be264429c3f2549ccfd9..1a73fb3f60c6607734a57470b391c9f5b4085ebc 100644 (file)
 /* this number is used when no interrupt has been assigned */
 #define NO_IRQ         INT_MAX
 
-/* hardware irq numbers */
-#define NR_IRQS                GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS    GxICR_NUM_IRQS
+#define NR_IRQS                NR_CPU_IRQS
+#endif
 
 /* external hardware irq numbers */
 #define NR_XIRQS       GxICR_NUM_XIRQS
index a848cd232eb4a1c14b30d1bec0c5c87998bfd450..97d0cb5af80784c632ebc8442ecb1343931ff865 100644 (file)
 #define ARCH_HAS_OWN_IRQ_REGS
 
 #ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+       return current_frame();
+}
 #endif
 
 #endif /* _ASM_IRQ_REGS_H */
index 5e529a117cb29915276dd30a17e41a02d406bfd0..7a7ae12c7119e42f9c83943dec0ad14ab9dfa7a7 100644 (file)
@@ -13,6 +13,9 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
 
 /*
  * interrupt control
  *   - level 6 - timer interrupt
  * - "enabled":  run in IM7
  */
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL      EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL      EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL      (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
 
 #ifndef __ASSEMBLY__
 
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
 /*
  * we make sure arch_irq_enable() doesn't cause priority inversion
  */
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
 
 static inline void arch_local_irq_enable(void)
 {
        unsigned long tmp;
+       int cpu = raw_smp_processor_id();
 
        asm volatile(
                "       mov     epsw,%0         \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
                "       or      %2,%0           \n"
                "       mov     %0,epsw         \n"
                : "=&d"(tmp)
-               : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
-               : "memory");
+               : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+               : "memory", "cc");
 }
 
 static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
 
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 {
-       return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+       return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
 }
 
 static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
  */
 static inline void arch_safe_halt(void)
 {
+#ifdef CONFIG_SMP
+       arch_local_irq_enable();
+#else
        asm volatile(
                "       or      %0,epsw \n"
                "       nop             \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
                :
                : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
                : "cc");
+#endif
 }
 
+#define __sleep_cpu()                          \
+do {                                           \
+       asm volatile(                           \
+               "       bset    %1,(%0)\n"      \
+               "1:     btst    %1,(%0)\n"      \
+               "       bne     1b\n"           \
+               :                               \
+               : "i"(&CPUM), "i"(CPUM_SLEEP)   \
+               : "cc"                          \
+               );                              \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+       asm volatile(
+               "       and     %0,epsw         \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               :
+               : "i"(~EPSW_IE)
+               : "memory"
+               );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+       unsigned long flags = arch_local_save_flags();
+       arch_local_cli();
+       return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+       asm volatile(
+               "       or      %0,epsw         \n"
+               :
+               : "i"(EPSW_IE)
+               : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+       asm volatile(
+               "       and     %0,epsw         \n"
+               "       or      %1,epsw         \n"
+               :
+               : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+               : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg)                  \
+       mov     epsw,reg
+
+#define LOCAL_IRQ_DISABLE                              \
+       and     ~EPSW_IM,epsw;                          \
+       or      EPSW_IE|MN10300_CLI_LEVEL,epsw;         \
+       nop;                                            \
+       nop;                                            \
+       nop
+
+#define LOCAL_IRQ_ENABLE               \
+       or      EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+       mov     reg,epsw
+
+#define LOCAL_CLI_SAVE(reg)    \
+       mov     epsw,reg;       \
+       and     ~EPSW_IE,epsw;  \
+       nop;                    \
+       nop;                    \
+       nop
+
+#define LOCAL_CLI              \
+       and     ~EPSW_IE,epsw;  \
+       nop;                    \
+       nop;                    \
+       nop
+
+#define LOCAL_STI              \
+       or      EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level)    \
+       and     ~EPSW_IM,epsw;                  \
+       or      EPSW_IE|(level),epsw
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_IRQFLAGS_H */
index cb294c244de3395e94792912a186e0e36063635d..c8f6c82672adb46ceefb31f719c72d80bb53d3f3 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
 
+#define MMU_CONTEXT_TLBPID_NR          256
 #define MMU_CONTEXT_TLBPID_MASK                0x000000ffUL
 #define MMU_CONTEXT_VERSION_MASK       0xffffff00UL
 #define MMU_CONTEXT_FIRST_VERSION      0x00000100UL
 #define MMU_NO_CONTEXT                 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR     0
 
 #define enter_lazy_tlb(mm, tsk)        do {} while (0)
 
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
 #ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
-       cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
-       cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
 #else
-#define cpu_ran_vm(cpu, mm)            do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm)      true
-#endif /* CONFIG_SMP */
+       return true;
+#endif
+}
 
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
  */
 static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
 {
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
        if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
                /* we exhausted the TLB PIDs of this version on this CPU, so we
                 * flush this CPU's TLB in its entirety and start new cycle */
-               flush_tlb_all();
+               local_flush_tlb_all();
 
                /* fix the TLB version if needed (we avoid version #0 so as to
                 * distingush MMU_NO_CONTEXT) */
@@ -100,23 +110,35 @@ static inline int init_new_context(struct task_struct *tsk,
        return 0;
 }
 
-/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm)    do { } while (0)
-
 /*
  * after we have set current->mm to a new value, this activates the context for
  * the new mm so we see the new mappings.
  */
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
 {
        PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
 }
+#else  /* CONFIG_MN10300_TLB_USE_PIDR */
 
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm)      (0)
+#define activate_context(mm)           local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm)    do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
  */
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        int cpu = smp_processor_id();
 
        if (prev != next) {
+#ifdef CONFIG_SMP
+               per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
                cpu_ran_vm(cpu, next);
-               activate_context(next, cpu);
                PTBR = (unsigned long) next->pgd;
-       } else if (!cpu_maybe_ran_vm(cpu, next)) {
-               activate_context(next, cpu);
+               activate_context(next);
        }
 }
 
index a19f11327cd87ce9dd2c578c1ecf25bf655d083f..146bacf193eac3f6c27536bd4b4b04b73942b60b 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_PGALLOC_H
 #define _ASM_PGALLOC_H
 
-#include <asm/processor.h>
 #include <asm/page.h>
 #include <linux/threads.h>
 #include <linux/mm.h>          /* for struct page */
index b049a8bd157774fd9c3f20d208913d04a8761c70..a1e894b5f65b9bab8e51d99010e80172c7696dd3 100644 (file)
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
  * area to catch addressing errors.
  */
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START  (0x70000000UL)
+#define VMALLOC_END    (0x7C000000UL)
+#else
 #define VMALLOC_OFFSET (8 * 1024 * 1024)
 #define VMALLOC_START  (0x70000000)
 #define VMALLOC_END    (0x7C000000)
+#endif
 
 #ifndef __ASSEMBLY__
 extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #endif
 
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID                xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED     xPTEL_UNUSED1_BIT       /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX           xPTEL_UNUSED2_BIT       /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE                xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT      xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY                xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL       xPTEL_G_BIT
-
-#define _PAGE_VALID            xPTEL_V
-#define _PAGE_ACCESSED         xPTEL_UNUSED1
-#define _PAGE_NX               xPTEL_UNUSED2           /* no-execute bit */
-#define _PAGE_CACHE            xPTEL_C
-#define _PAGE_PRESENT          xPTEL_PV
-#define _PAGE_DIRTY            xPTEL_D
-#define _PAGE_PROT             xPTEL_PR
-#define _PAGE_PROT_RKNU                xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU                xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU                xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU                xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU                xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL           xPTEL_G
-#define _PAGE_PSE              xPTEL_PS_4Mb            /* 4MB page */
-
-#define _PAGE_FILE             xPTEL_UNUSED1_BIT       /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX      0x040
-#define __PAGE_PROT_USER       0x080
-#define __PAGE_PROT_WRITE      0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID                xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE                xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT      xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY                xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL       xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED     xPTEL2_UNUSED1_BIT      /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID            xPTEL2_V
+#define _PAGE_CACHE            xPTEL2_C
+#define _PAGE_PRESENT          xPTEL2_PV
+#define _PAGE_DIRTY            xPTEL2_D
+#define _PAGE_PROT             xPTEL2_PR
+#define _PAGE_PROT_RKNU                xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU                xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU                xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU                xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU                xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL           xPTEL2_G
+#define _PAGE_PS_MASK          xPTEL2_PS
+#define _PAGE_PS_4Kb           xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb         xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb           xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb           xPTEL2_PS_4Mb
+#define _PAGE_PSE              xPTEL2_PS_4Mb           /* 4MB page */
+#define _PAGE_CACHE_WT         xPTEL2_CWT
+#define _PAGE_ACCESSED         xPTEL2_UNUSED1
+#define _PAGE_NX               0                       /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE             xPTEL2_C        /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE         0x000           /* If not present */
+
+#define __PAGE_PROT_UWAUX      0x010
+#define __PAGE_PROT_USER       0x020
+#define __PAGE_PROT_WRITE      0x040
 
 #define _PAGE_PRESENTV         (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE         0x000   /* If not present */
 
 #ifndef __ASSEMBLY__
 
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
 
+#define __PAGE_USERIO          (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO            __pgprot(__PAGE_USERIO)
+
 /*
  * Whilst the MN10300 can do page protection for execute (given separate data
  * and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
        return 1;
 }
 
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS      29
+#define PTE_FILE_MAX_BITS      30
 
 #define pte_to_pgoff(pte)      (pte_val(pte) >> 2)
 #define pgoff_to_pte(off)      __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
  * Macro to mark a page protection value as "uncacheable".  On processors which
  * do not support it, this is a no-op.
  */
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
 
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot)   __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index f7d4b0d285e8dddb835781ac071898ff5d34dfe9..4c1b5cc14c190edc529c05844360f9fd52b1bb96 100644 (file)
 #ifndef _ASM_PROCESSOR_H
 #define _ASM_PROCESSOR_H
 
+#include <linux/threads.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
 
 /* Forward declaration, a strange C thing */
 struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
        __pc;                                   \
 })
 
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
 extern void show_registers(struct pt_regs *regs);
 
 /*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
 
 struct mn10300_cpuinfo {
        int             type;
-       unsigned long   loops_per_sec;
+       unsigned long   loops_per_jiffy;
        char            hard_math;
-       unsigned long   *pgd_quick;
-       unsigned long   *pte_quick;
-       unsigned long   pgtable_cache_sz;
 };
 
 extern struct mn10300_cpuinfo boot_cpu_data;
 
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else  /* CONFIG_SMP */
 #define cpu_data &boot_cpu_data
 #define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
 
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
  */
 #define TASK_UNMAPPED_BASE     0x30000000
 
-typedef struct {
-       unsigned long   seg;
-} mm_segment_t;
-
 struct fpu_state_struct {
        unsigned long   fs[32];         /* fpu registers */
        unsigned long   fpcr;           /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
        unsigned long           a3;             /* kernel FP */
        unsigned long           wchan;
        unsigned long           usp;
-       struct pt_regs          *__frame;
        unsigned long           fpu_flags;
 #define THREAD_USING_FPU       0x00000001      /* T if this task is using the FPU */
+#define THREAD_HAS_FPU         0x00000002      /* T if this task owns the FPU right now */
        struct fpu_state_struct fpu_state;
 };
 
-#define INIT_THREAD                            \
-{                                              \
-       .uregs          = init_uregs,           \
-       .pc             = 0,                    \
-       .sp             = 0,                    \
-       .a3             = 0,                    \
-       .wchan          = 0,                    \
-       .__frame        = NULL,                 \
+#define INIT_THREAD            \
+{                              \
+       .uregs  = init_uregs,   \
+       .pc     = 0,            \
+       .sp     = 0,            \
+       .a3     = 0,            \
+       .wchan  = 0,            \
 }
 
 #define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
  * - need to discard the frame stacked by the kernel thread invoking the execve
  *   syscall (see RESTORE_ALL macro)
  */
-#define start_thread(regs, new_pc, new_sp) do {                \
-       set_fs(USER_DS);                                \
-       __frame = current->thread.uregs;                \
-       __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;   \
-       __frame->pc = new_pc;                           \
-       __frame->sp = new_sp;                           \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+                               unsigned long new_pc, unsigned long new_sp)
+{
+       struct thread_info *ti = current_thread_info();
+       struct pt_regs *frame0;
+       set_fs(USER_DS);
+
+       frame0 = thread_info_to_uregs(ti);
+       frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+       frame0->pc = new_pc;
+       frame0->sp = new_sp;
+       ti->frame = frame0;
+}
+
 
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
 
 static inline void prefetch(const void *x)
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
        asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
 
 static inline void prefetchw(const void *x)
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
        asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
index 7c2e911052b64699a55d8d23517557db5eb5ae41..b6961811d4458d05a689908ac90686f13420c270 100644 (file)
@@ -40,7 +40,6 @@
 #define        PT_PC           26
 #define NR_PTREGS      27
 
-#ifndef __ASSEMBLY__
 /*
  * This defines the way registers are stored in the event of an exception
  * - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
        unsigned long           epsw;
        unsigned long           pc;
 };
-#endif
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 #define PTRACE_GETREGS            12
@@ -86,12 +84,7 @@ struct pt_regs {
 /* options set using PTRACE_SETOPTIONS */
 #define PTRACE_O_TRACESYSGOOD     0x00000001
 
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame;                /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
 #define instruction_pointer(regs)      ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
 
 #define arch_has_single_step() (1)
 
-#endif  /*  !__ASSEMBLY  */
-
 #define profile_pc(regs) ((regs)->pc)
 
-#endif  /*  __KERNEL__  */
+#endif /* __KERNEL__  */
 #endif /* _ASM_PTRACE_H */
index 174523d501323e313a5c7d1e2a5f684e0ddc0bab..10c7502a113fbce1cfeb07b56eec061caa1523c8 100644 (file)
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
        RSTCTR |= RSTCTR_CHIPRST;
 }
 
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
 
 extern void watchdog_go(void);
 extern asmlinkage void watchdog_handler(void);
index c295194cc70330b3da8a34fe0db05e2f34a76044..6c14bb1d0d9b8c20f45fff9279052eb43ab113e2 100644 (file)
 
 #include <linux/init.h>
 
-extern void check_rtc_time(void);
 extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
 
 #else /* !CONFIG_MN10300_RTC */
 
-static inline void check_rtc_time(void)
-{
-}
-
 static inline void calibrate_clock(void)
 {
 }
 
-static inline unsigned long get_initial_rtc_time(void)
-{
-       return 0;
-}
-
 #endif /* !CONFIG_MN10300_RTC */
 
 #include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644 (file)
index 0000000..6d594d4
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS            0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR       "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper)                              \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_read_lock_const(rw, helper)                            \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_read_lock(rw, helper) \
+       do {                                                            \
+               if (__builtin_constant_p(rw))                           \
+                       __build_read_lock_const(rw, helper);            \
+               else                                                    \
+                       __build_read_lock_ptr(rw, helper);              \
+       } while (0)
+
+#define __build_write_lock_ptr(rw, helper)                             \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_write_lock_const(rw, helper)                           \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_write_lock(rw, helper)                                 \
+       do {                                                            \
+               if (__builtin_constant_p(rw))                           \
+                       __build_write_lock_const(rw, helper);           \
+               else                                                    \
+                       __build_write_lock_ptr(rw, helper);             \
+       } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
index 6498469e93ac33390d7b2031c4a6781e9eb1c6cd..8320cda32f5aa04ed1cdad0a70dc182cc417c841 100644 (file)
 /* serial port 0 */
 #define        SC0CTR                  __SYSREG(0xd4002000, u16)       /* control reg */
 #define        SC01CTR_CK              0x0007  /* clock source select */
-#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow (serial port 1 only) */
 #define        SC01CTR_CK_IOCLK_8      0x0001  /* - 1/8 IOCLK */
 #define        SC01CTR_CK_IOCLK_32     0x0002  /* - 1/32 IOCLK */
+#define        SC01CTR_CK_EXTERN_8     0x0006  /* - 1/8 external closk */
+#define        SC01CTR_CK_EXTERN       0x0007  /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
 #define        SC0CTR_CK_TM2UFLOW_2    0x0003  /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 0 underflow (serial port 0 only) */
 #define        SC0CTR_CK_TM2UFLOW_8    0x0005  /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define        SC1CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define        SC1CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define        SC1CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 1 underflow (serial port 1 only) */
 #define        SC1CTR_CK_TM3UFLOW_8    0x0005  /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define        SC01CTR_CK_EXTERN_8     0x0006  /* - 1/8 external closk */
-#define        SC01CTR_CK_EXTERN       0x0007  /* - external closk */
+#else  /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define        SC0CTR_CK_TM2UFLOW_8    0x0005  /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define        SC1CTR_CK_TM12UFLOW_8   0x0000  /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
 #define        SC01CTR_STB             0x0008  /* stop bit select */
 #define        SC01CTR_STB_1BIT        0x0000  /* - 1 stop bit */
 #define        SC01CTR_STB_2BIT        0x0008  /* - 2 stop bits */
 
 /* serial port 2 */
 #define        SC2CTR                  __SYSREG(0xd4002020, u16)       /* control reg */
+#ifdef CONFIG_AM33_2
 #define        SC2CTR_CK               0x0003  /* clock source select */
 #define        SC2CTR_CK_TM10UFLOW     0x0000  /* - timer 10 underflow */
 #define        SC2CTR_CK_TM2UFLOW      0x0001  /* - timer 2 underflow */
 #define        SC2CTR_CK_EXTERN        0x0002  /* - external closk */
 #define        SC2CTR_CK_TM3UFLOW      0x0003  /* - timer 3 underflow */
+#else  /* CONFIG_AM33_2 */
+#define        SC2CTR_CK               0x0007  /* clock source select */
+#define        SC2CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow */
+#define        SC2CTR_CK_IOCLK_8       0x0001  /* - 1/8 IOCLK */
+#define        SC2CTR_CK_IOCLK_32      0x0002  /* - 1/32 IOCLK */
+#define        SC2CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow */
+#define        SC2CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 1 underflow */
+#define        SC2CTR_CK_TM3UFLOW_8    0x0005  /* - 1/8 timer 3 underflow */
+#define        SC2CTR_CK_EXTERN_8      0x0006  /* - 1/8 external closk */
+#define        SC2CTR_CK_EXTERN        0x0007  /* - external closk */
+#endif /* CONFIG_AM33_2 */
 #define        SC2CTR_STB              0x0008  /* stop bit select */
 #define        SC2CTR_STB_1BIT         0x0000  /* - 1 stop bit */
 #define        SC2CTR_STB_2BIT         0x0008  /* - 2 stop bits */
 #define SC2ICR_RES             0x04    /* receive error select */
 #define SC2ICR_RI              0x01    /* receive interrupt cause */
 
-#define        SC2TXB                  __SYSREG(0xd4002018, u8)        /* transmit buffer reg */
-#define        SC2RXB                  __SYSREG(0xd4002019, u8)        /* receive buffer reg */
-#define        SC2STR                  __SYSREG(0xd400201c, u8)        /* status reg */
+#define        SC2TXB                  __SYSREG(0xd4002028, u8)        /* transmit buffer reg */
+#define        SC2RXB                  __SYSREG(0xd4002029, u8)        /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define        SC2STR                  __SYSREG(0xd400202c, u8)        /* status reg */
+#else  /* CONFIG_AM33_2 */
+#define        SC2STR                  __SYSREG(0xd400202c, u16)       /* status reg */
+#endif /* CONFIG_AM33_2 */
 #define SC2STR_OEF             0x0001  /* overrun error found */
 #define SC2STR_PEF             0x0002  /* parity error found */
 #define SC2STR_FEF             0x0004  /* framing error found */
 #define SC2STR_RXF             0x0040  /* receive status */
 #define SC2STR_TXF             0x0080  /* transmit status */
 
+#ifdef CONFIG_AM33_2
 #define        SC2TIM                  __SYSREG(0xd400202d, u8)        /* status reg */
+#endif
 
+#ifdef CONFIG_AM33_2
 #define SC2RXIRQ               24      /* serial 2 Receive IRQ */
 #define SC2TXIRQ               25      /* serial 2 Transmit IRQ */
+#else  /* CONFIG_AM33_2 */
+#define SC2RXIRQ               68      /* serial 2 Receive IRQ */
+#define SC2TXIRQ               69      /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
 
 #define        SC2RXICR                GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
 #define        SC2TXICR                GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
index a29445cddd6fc559f631ba9a571aa4fcbbd0dde6..23a79929359943aef854b7ec332dec679efb18e6 100644 (file)
@@ -9,10 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD      (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
 #endif
 
 #include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
index 4eb8c61b7dab42eef2c62a091de76b7edd72cdc6..a3930e43a958d1e4077c028afd94b122963032cb 100644 (file)
@@ -3,6 +3,16 @@
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ *                  for SMP support.
+ *  22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ *  23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ *  23-Jun-2008 MEI Delete INTC_IPI.
+ *  22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ *  04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public Licence
  * as published by the Free Software Foundation; either version
 #ifndef _ASM_SMP_H
 #define _ASM_SMP_H
 
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #endif
 
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI         63
+#define CALL_FUNC_SINGLE_IPI   192
+#define LOCAL_TIMER_IPI                193
+#define FLUSH_CACHE_IPI                194
+#define CALL_FUNCTION_NMI_IPI  195
+#define GDB_NMI_IPI            196
+
+#define SMP_BOOT_IRQ           195
+
+#define RESCHEDULE_GxICR_LV    GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV   GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV   GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV      GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI        100
+#define DELAY_TIME_BOOT_IPI    75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed.  A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 #endif
+
+static inline int cpu_logical_map(int cpu)
+{
+       return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+       return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644 (file)
index 0000000..2fcd108
--- /dev/null
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
index 4bf9c8b169e082dfe220e316bd70a7e40591a3f6..93429154e898e72c1f3e39a4e6ac71f8b7cc1ed5 100644 (file)
 #ifndef _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
 
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       asm volatile(
+               "       bclr    1,(0,%0)        \n"
+               :
+               : "a"(&lock->slock)
+               : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       int ret;
+
+       asm volatile(
+               "       mov     1,%0            \n"
+               "       bset    %0,(%1)         \n"
+               "       bne     1f              \n"
+               "       clr     %0              \n"
+               "1:     xor     1,%0            \n"
+               : "=d"(ret)
+               : "a"(&lock->slock)
+               : "memory", "cc");
+
+       return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       asm volatile(
+               "1:     bset    1,(0,%0)        \n"
+               "       bne     1b              \n"
+               :
+               : "a"(&lock->slock)
+               : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+                                        unsigned long flags)
+{
+       int temp;
+
+       asm volatile(
+               "1:     bset    1,(0,%2)        \n"
+               "       beq     3f              \n"
+               "       mov     %1,epsw         \n"
+               "2:     mov     (0,%2),%0       \n"
+               "       or      %0,%0           \n"
+               "       bne     2b              \n"
+               "       mov     %3,%0           \n"
+               "       mov     %0,epsw         \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               "       bra     1b\n"
+               "3:                             \n"
+               : "=&d" (temp)
+               : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+               : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_read_lock(rw, "__read_lock_failed");
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               while (atomic_dec_return(count) < 0)
+                       atomic_inc(count);
+       }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_write_lock(rw, "__write_lock_failed");
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+                       atomic_add(RW_LOCK_BIAS, count);
+       }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_read_unlock(rw);
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               atomic_inc(count);
+       }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_write_unlock(rw);
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               atomic_add(RW_LOCK_BIAS, count);
+       }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+       atomic_t *count = (atomic_t *)lock;
+       atomic_dec(count);
+       if (atomic_read(count) >= 0)
+               return 1;
+       atomic_inc(count);
+       return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+       atomic_t *count = (atomic_t *)lock;
+       if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+               return 1;
+       atomic_add(RW_LOCK_BIAS, count);
+       return 0;
+}
+
+#define arch_read_lock_flags(lock, flags)  arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock)  cpu_relax()
+#define _raw_read_relax(lock)  cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
 #endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644 (file)
index 0000000..653dc51
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+       unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
+
+typedef struct {
+       unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED                { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
index 9f7c7e17c01ee7d8560a568dd4f2d98636c7f130..8ff3e5aaca4124cba71e8baade685253e551c94c 100644 (file)
 #define _ASM_SYSTEM_H
 
 #include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next)                                         \
+       do {                                                            \
+               if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) {        \
+                       (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU;    \
+                       (prev)->thread.uregs->epsw &= ~EPSW_FE;         \
+                       fpu_save(&(prev)->thread.fpu_state);            \
+               }                                                       \
+       } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
 
 struct task_struct;
 struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
 /* context switching is now performed out-of-line in switch_to.S */
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
+       switch_fpu(prev, next);                                         \
        current->thread.wchan = (u_long) __builtin_return_address(0);   \
        (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
        mb();                                                           \
@@ -40,8 +58,6 @@ do {                                                                  \
 
 #define nop() asm volatile ("nop")
 
-#endif /* !__ASSEMBLY__ */
-
 /*
  * Force strict CPU ordering.
  * And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do {                                                                        \
 #define smp_mb()       mb()
 #define smp_rmb()      rmb()
 #define smp_wmb()      wmb()
-#else
+#define set_mb(var, value)  do { xchg(&var, value); } while (0)
+#else  /* CONFIG_SMP */
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
-#endif
-
 #define set_mb(var, value)  do { var = value;  mb(); } while (0)
+#endif /* CONFIG_SMP */
+
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 #define read_barrier_depends()         do {} while (0)
 #define smp_read_barrier_depends()     do {} while (0)
 
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
-       unsigned long retval;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       retval = *m;
-       *m = val;
-       local_irq_restore(flags);
-       return retval;
-}
-
-#define xchg(ptr, v)                                           \
-       ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),    \
-                                    (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
-                                     unsigned long old, unsigned long new)
-{
-       unsigned long retval;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       retval = *m;
-       if (retval == old)
-               *m = new;
-       local_irq_restore(flags);
-       return retval;
-}
-
-#define cmpxchg(ptr, o, n)                                     \
-       ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
-                                       (unsigned long)(o),     \
-                                       (unsigned long)(n)))
-
 #endif /* !__ASSEMBLY__ */
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_SYSTEM_H */
index 2001cb657a95029ddeb19f40f2706064876e1b55..aa07a4a5d7949406550ba001f7a8faaa631633ec 100644 (file)
 
 #include <asm/page.h>
 
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
 #define PREEMPT_ACTIVE         0x10000000
 
 #ifdef CONFIG_4KSTACKS
  *   must also be changed
  */
 #ifndef __ASSEMBLY__
+typedef struct {
+       unsigned long   seg;
+} mm_segment_t;
 
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
        struct exec_domain      *exec_domain;   /* execution domain */
+       struct pt_regs          *frame;         /* current exception frame */
        unsigned long           flags;          /* low level flags */
        __u32                   cpu;            /* current CPU */
        __s32                   preempt_count;  /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
        __u8                    supervisor_stack[0];
 };
 
+#define thread_info_to_uregs(ti)                                       \
+       ((struct pt_regs *)                                             \
+        ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
 #else /* !__ASSEMBLY__ */
 
 #ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
        return ti;
 }
 
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+       return current_thread_info()->frame;
+}
+
 /* how to get the current stack pointer from C */
 static inline unsigned long current_stack_pointer(void)
 {
index 1d883b7f94ab9b63776255c681d9001d690d28ad..c634977caf66d1338e52fae61f1dbe401fb249fe 100644 (file)
 
 #ifdef __KERNEL__
 
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
 #define        TMPSCNT                 __SYSREG(0xd4003071, u8) /* timer prescaler control */
 #define        TMPSCNT_ENABLE          0x80    /* timer prescaler enable */
 #define        TMPSCNT_DISABLE         0x00    /* timer prescaler disable */
 
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
 #define        TM0MD                   __SYSREG(0xd4003000, u8) /* timer 0 mode register */
 #define        TM0MD_SRC               0x07    /* timer source */
 #define        TM0MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM0MD_SRC_IOCLK_8       0x01    /* - 1/8 IOCLK */
 #define        TM0MD_SRC_IOCLK_32      0x02    /* - 1/32 IOCLK */
-#define        TM0MD_SRC_TM2IO         0x03    /* - TM2IO pin input */
 #define        TM0MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM0MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if    defined(CONFIG_AM33_2)
+#define        TM0MD_SRC_TM2IO         0x03    /* - TM2IO pin input */
 #define        TM0MD_SRC_TM0IO         0x07    /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM0MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM0MD_COUNT_ENABLE      0x80    /* timer count enable */
 
@@ -43,7 +49,9 @@
 #define        TM1MD_SRC_TM0CASCADE    0x03    /* - cascade with timer 0 */
 #define        TM1MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM1MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM1MD_SRC_TM1IO         0x07    /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM1MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM1MD_COUNT_ENABLE      0x80    /* timer count enable */
 
@@ -55,7 +63,9 @@
 #define        TM2MD_SRC_TM1CASCADE    0x03    /* - cascade with timer 1 */
 #define        TM2MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM2MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM2MD_SRC_TM2IO         0x07    /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM2MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM2MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM3MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM3MD_SRC_IOCLK_8       0x01    /* - 1/8 IOCLK */
 #define        TM3MD_SRC_IOCLK_32      0x02    /* - 1/32 IOCLK */
-#define        TM3MD_SRC_TM1CASCADE    0x03    /* - cascade with timer 2 */
+#define        TM3MD_SRC_TM2CASCADE    0x03    /* - cascade with timer 2 */
 #define        TM3MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM3MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM3MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM3MD_SRC_TM3IO         0x07    /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM3MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM3MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM2ICR                  GxICR(TM2IRQ)   /* timer 2 uflow intr ctrl reg */
 #define        TM3ICR                  GxICR(TM3IRQ)   /* timer 3 uflow intr ctrl reg */
 
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
 #define        TM4MD                   __SYSREG(0xd4003080, u8)   /* timer 4 mode register */
 #define        TM4MD_SRC               0x07    /* timer source */
 #define        TM4MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM4MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM4MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM4MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM4MD_SRC_TM4IO         0x07    /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM4MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM4MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM5MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM5MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM5MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM5MD_SRC_TM5IO         0x07    /* - TM5IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM5MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM5MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM5MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM7MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM7MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM7MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM7MD_SRC_TM7IO         0x07    /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM7MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM7MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM8MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM8MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM8MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM8MD_SRC_TM8IO         0x07    /* - TM8IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM8MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM8MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM8MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM9MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM9MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM9MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM9MD_SRC_TM9IO         0x07    /* - TM9IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM9MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM9MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM9MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM10MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
 #define        TM10MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
 #define        TM10MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM10MD_SRC_TM10IO       0x07    /* - TM10IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM10MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM10MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
 #define        TM10MD_COUNT_ENABLE     0x80    /* timer count enable */
 
 #define        TM11MD_SRC_IOCLK        0x00    /* - IOCLK */
 #define        TM11MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
 #define        TM11MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
-#define        TM11MD_SRC_TM7CASCADE   0x03    /* - cascade with timer 7 */
 #define        TM11MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
 #define        TM11MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
 #define        TM11MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM11MD_SRC_TM11IO       0x07    /* - TM11IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM11MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM11MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
 #define        TM11MD_COUNT_ENABLE     0x80    /* timer count enable */
 
+#if defined(CONFIG_AM34_2)
+#define        TM12MD                  __SYSREG(0xd4003180, u8)   /* timer 11 mode register */
+#define        TM12MD_SRC              0x07    /* timer source */
+#define        TM12MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM12MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM12MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM12MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM12MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM12MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM12MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM12MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM12MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM13MD                  __SYSREG(0xd4003182, u8)   /* timer 11 mode register */
+#define        TM13MD_SRC              0x07    /* timer source */
+#define        TM13MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM13MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM13MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM13MD_SRC_TM12CASCADE  0x03    /* - cascade with timer 12 */
+#define        TM13MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM13MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM13MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM13MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM13MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM13MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM14MD                  __SYSREG(0xd4003184, u8)   /* timer 11 mode register */
+#define        TM14MD_SRC              0x07    /* timer source */
+#define        TM14MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM14MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM14MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM14MD_SRC_TM13CASCADE  0x03    /* - cascade with timer 13 */
+#define        TM14MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM14MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM14MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM14MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM14MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM14MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM15MD                  __SYSREG(0xd4003186, u8)   /* timer 11 mode register */
+#define        TM15MD_SRC              0x07    /* timer source */
+#define        TM15MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM15MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM15MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM15MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM15MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM15MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM15MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM15MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM15MD_COUNT_ENABLE     0x80    /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
 #define        TM4BR                   __SYSREG(0xd4003090, u16)  /* timer 4 base register */
 #define        TM5BR                   __SYSREG(0xd4003092, u16)  /* timer 5 base register */
+#define        TM45BR                  __SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
 #define        TM7BR                   __SYSREG(0xd4003096, u16)  /* timer 7 base register */
 #define        TM8BR                   __SYSREG(0xd4003098, u16)  /* timer 8 base register */
 #define        TM9BR                   __SYSREG(0xd400309a, u16)  /* timer 9 base register */
+#define        TM89BR                  __SYSREG(0xd4003098, u32)  /* timer 8:9 base register */
 #define        TM10BR                  __SYSREG(0xd400309c, u16)  /* timer 10 base register */
 #define        TM11BR                  __SYSREG(0xd400309e, u16)  /* timer 11 base register */
-#define        TM45BR                  __SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define        TM12BR                  __SYSREG(0xd4003190, u16)  /* timer 12 base register */
+#define        TM13BR                  __SYSREG(0xd4003192, u16)  /* timer 13 base register */
+#define        TM14BR                  __SYSREG(0xd4003194, u16)  /* timer 14 base register */
+#define        TM15BR                  __SYSREG(0xd4003196, u16)  /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
 
 #define        TM4BC                   __SYSREG(0xd40030a0, u16)  /* timer 4 binary counter */
 #define        TM5BC                   __SYSREG(0xd40030a2, u16)  /* timer 5 binary counter */
 #define        TM45BC                  __SYSREG(0xd40030a0, u32)  /* timer 4:5 binary counter */
-
 #define        TM7BC                   __SYSREG(0xd40030a6, u16)  /* timer 7 binary counter */
 #define        TM8BC                   __SYSREG(0xd40030a8, u16)  /* timer 8 binary counter */
 #define        TM9BC                   __SYSREG(0xd40030aa, u16)  /* timer 9 binary counter */
+#define        TM89BC                  __SYSREG(0xd40030a8, u32)  /* timer 8:9 binary counter */
 #define        TM10BC                  __SYSREG(0xd40030ac, u16)  /* timer 10 binary counter */
 #define        TM11BC                  __SYSREG(0xd40030ae, u16)  /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define        TM12BC                  __SYSREG(0xd40031a0, u16)  /* timer 12 binary counter */
+#define        TM13BC                  __SYSREG(0xd40031a2, u16)  /* timer 13 binary counter */
+#define        TM14BC                  __SYSREG(0xd40031a4, u16)  /* timer 14 binary counter */
+#define        TM15BC                  __SYSREG(0xd40031a6, u16)  /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
 
 #define TM4IRQ                 6       /* timer 4 IRQ */
 #define TM5IRQ                 7       /* timer 5 IRQ */
 #define TM9IRQ                 13      /* timer 9 IRQ */
 #define TM10IRQ                        14      /* timer 10 IRQ */
 #define TM11IRQ                        15      /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ                        64      /* timer 12 IRQ */
+#define TM13IRQ                        65      /* timer 13 IRQ */
+#define TM14IRQ                        66      /* timer 14 IRQ */
+#define TM15IRQ                        67      /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
 
 #define        TM4ICR                  GxICR(TM4IRQ)   /* timer 4 uflow intr ctrl reg */
 #define        TM5ICR                  GxICR(TM5IRQ)   /* timer 5 uflow intr ctrl reg */
 #define        TM9ICR                  GxICR(TM9IRQ)   /* timer 9 uflow intr ctrl reg */
 #define        TM10ICR                 GxICR(TM10IRQ)  /* timer 10 uflow intr ctrl reg */
 #define        TM11ICR                 GxICR(TM11IRQ)  /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define        TM12ICR                 GxICR(TM12IRQ)  /* timer 12 uflow intr ctrl reg */
+#define        TM13ICR                 GxICR(TM13IRQ)  /* timer 13 uflow intr ctrl reg */
+#define        TM14ICR                 GxICR(TM14IRQ)  /* timer 14 uflow intr ctrl reg */
+#define        TM15ICR                 GxICR(TM15IRQ)  /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
 #define        TM6MD                   __SYSREG(0xd4003084, u16)  /* timer6 mode register */
 #define        TM6MD_SRC               0x0007  /* timer source */
 #define        TM6MD_SRC_IOCLK         0x0000  /* - IOCLK */
 #define        TM6MD_SRC_IOCLK_32      0x0002  /* - 1/32 IOCLK */
 #define        TM6MD_SRC_TM0UFLOW      0x0004  /* - timer 0 underflow */
 #define        TM6MD_SRC_TM1UFLOW      0x0005  /* - timer 1 underflow */
-#define        TM6MD_SRC_TM6IOB_BOTH   0x0006  /* - TM6IOB pin input (both edges) */
+#define        TM6MD_SRC_TM2UFLOW      0x0006  /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define     TM6MD_SRC_TM6IOB_BOTH   0x0006 */       /* - TM6IOB pin input (both edges) */
 #define        TM6MD_SRC_TM6IOB_SINGLE 0x0007  /* - TM6IOB pin input (single edge) */
-#define        TM6MD_CLR_ENABLE        0x0010  /* clear count enable */
+#endif /* CONFIG_AM33_2 */
 #define        TM6MD_ONESHOT_ENABLE    0x0040  /* oneshot count */
+#define        TM6MD_CLR_ENABLE        0x0010  /* clear count enable */
+#if    defined(CONFIG_AM33_2)
 #define        TM6MD_TRIG_ENABLE       0x0080  /* TM6IOB pin trigger enable */
 #define TM6MD_PWM              0x3800  /* PWM output mode */
 #define TM6MD_PWM_DIS          0x0000  /* - disabled */
 #define        TM6MD_PWM_11BIT         0x1800  /* - 11 bits mode */
 #define        TM6MD_PWM_12BIT         0x3000  /* - 12 bits mode */
 #define        TM6MD_PWM_14BIT         0x3800  /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
 #define        TM6MD_INIT_COUNTER      0x4000  /* initialize TMnBC to zero */
 #define        TM6MD_COUNT_ENABLE      0x8000  /* timer count enable */
 
 #define        TM6MDA                  __SYSREG(0xd40030b4, u8)   /* timer6 cmp/cap A mode reg */
+#define        TM6MDA_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
+#define        TM6MDA_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
+#if    defined(CONFIG_AM33_2)
 #define TM6MDA_OUT             0x07    /* output select */
 #define        TM6MDA_OUT_SETA_RESETB  0x00    /* - set at match A, reset at match B */
 #define        TM6MDA_OUT_SETA_RESETOV 0x01    /* - set at match A, reset at overflow */
 #define        TM6MDA_OUT_RESETA       0x03    /* - reset at match A */
 #define        TM6MDA_OUT_TOGGLE       0x04    /* - toggle on match A */
 #define TM6MDA_MODE            0xc0    /* compare A register mode */
-#define        TM6MDA_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
-#define        TM6MDA_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
 #define        TM6MDA_MODE_CAP_S_EDGE  0x80    /* - capture, single edge mode */
 #define        TM6MDA_MODE_CAP_D_EDGE  0xc0    /* - capture, double edge mode */
 #define TM6MDA_EDGE            0x20    /* compare A edge select */
 #define        TM6MDA_EDGE_FALLING     0x00    /* capture on falling edge */
 #define        TM6MDA_EDGE_RISING      0x20    /* capture on rising edge */
 #define        TM6MDA_CAPTURE_ENABLE   0x10    /* capture enable */
+#else  /* !CONFIG_AM33_2 */
+#define        TM6MDA_MODE             0x40    /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
 
 #define        TM6MDB                  __SYSREG(0xd40030b5, u8)   /* timer6 cmp/cap B mode reg */
+#define        TM6MDB_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
+#define        TM6MDB_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
 #define TM6MDB_OUT             0x07    /* output select */
 #define        TM6MDB_OUT_SETB_RESETA  0x00    /* - set at match B, reset at match A */
 #define        TM6MDB_OUT_SETB_RESETOV 0x01    /* - set at match B */
 #define        TM6MDB_OUT_RESETB       0x03    /* - reset at match B */
 #define        TM6MDB_OUT_TOGGLE       0x04    /* - toggle on match B */
 #define TM6MDB_MODE            0xc0    /* compare B register mode */
-#define        TM6MDB_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
-#define        TM6MDB_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
 #define        TM6MDB_MODE_CAP_S_EDGE  0x80    /* - capture, single edge mode */
 #define        TM6MDB_MODE_CAP_D_EDGE  0xc0    /* - capture, double edge mode */
 #define TM6MDB_EDGE            0x20    /* compare B edge select */
 #define        TM6MDB_EDGE_FALLING     0x00    /* capture on falling edge */
 #define        TM6MDB_EDGE_RISING      0x20    /* capture on rising edge */
 #define        TM6MDB_CAPTURE_ENABLE   0x10    /* capture enable */
+#else  /* !CONFIG_AM33_2 */
+#define        TM6MDB_MODE             0x40    /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
 
 #define        TM6CA                   __SYSREG(0xd40030c4, u16)   /* timer6 cmp/capture reg A */
 #define        TM6CB                   __SYSREG(0xd40030d4, u16)   /* timer6 cmp/capture reg B */
 #define        TM6AICR                 GxICR(TM6AIRQ)  /* timer 6A intr control reg */
 #define        TM6BICR                 GxICR(TM6BIRQ)  /* timer 6B intr control reg */
 
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define        TMTMD                   __SYSREG(0xd4004100, u8)        /* Tick Timer mode register */
+#define        TMTMD_TMTLDE            0x40    /* initialize TMTBC = TMTBR */
+#define        TMTMD_TMTCNE            0x80    /* timer count enable       */
+
+#define        TMTBR                   __SYSREG(0xd4004110, u32)       /* Tick Timer mode reg */
+#define        TMTBC                   __SYSREG(0xd4004120, u32)       /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define        TMSMD                   __SYSREG(0xd4004140, u8)        /* Tick Timer mode register */
+#define        TMSMD_TMSLDE            0x40            /* initialize TMSBC = TMSBR */
+#define        TMSMD_TMSCNE            0x80            /* timer count enable       */
+
+#define        TMSBR                   __SYSREG(0xd4004150, u32)       /* Tick Timer mode register */
+#define        TMSBC                   __SYSREG(0xd4004160, u32)       /* Tick Timer mode register */
+
+#define TMTIRQ                 119             /* OS Tick timer   IRQ */
+#define TMSIRQ                 120             /* Timestamp timer IRQ */
+
+#define        TMTICR                  GxICR(TMTIRQ)   /* OS Tick timer   uflow intr ctrl reg */
+#define        TMSICR                  GxICR(TMSIRQ)   /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMER_REGS_H */
index 8d031f9e117d7db554bf3802cd357d36bbbb110b..bd4e90dfe6c26d37c366abec8275a1d0d1e7b774 100644 (file)
 
 #define TICK_SIZE (tick_nsec / 1000)
 
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
-                                * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
 
 #ifdef __KERNEL__
 
+extern cycles_t cacheflush_time;
+
 static inline cycles_t get_cycles(void)
 {
        return read_timestamp_counter();
 }
 
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 1a7e29281c5d0642ca0e66b91ca054f8353989b1..efddd6e1adeadef731997d6b78214e23e962a20c 100644 (file)
 #ifndef _ASM_TLBFLUSH_H
 #define _ASM_TLBFLUSH_H
 
+#include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()                                          \
-do {                                                           \
-       int w;                                                  \
-       __asm__ __volatile__                                    \
-               ("      mov %1,%0               \n"             \
-                "      or %2,%0                \n"             \
-                "      mov %0,%1               \n"             \
-                : "=d"(w)                                      \
-                : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)      \
-                : "cc", "memory"                               \
-                );                                             \
-} while (0)
+struct tlb_state {
+       struct mm_struct        *active_mm;
+       int                     state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+       int w;
+       asm volatile(
+               "       mov     %1,%0           \n"
+               "       or      %2,%0           \n"
+               "       mov     %0,%1           \n"
+               : "=d"(w)
+               : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+               : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+       local_flush_tlb();
+}
 
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+       local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+       unsigned long pteu, flags, cnx;
+
+       addr &= PAGE_MASK;
+
+       local_irq_save(flags);
+
+       cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+       cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+       if (cnx) {
+               pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+               pteu |= cnx & xPTEU_PID;
+#endif
+               IPTEU = pteu;
+               DPTEU = pteu;
+               if (IPTEL & xPTEL_V)
+                       IPTEL = 0;
+               if (DPTEL & xPTEL_V)
+                       DPTEL = 0;
+       }
+       local_irq_restore(flags);
+}
 
 /*
  * TLB flushing:
@@ -40,41 +94,61 @@ do {                                                                \
  *  - flush_tlb_range(mm, start, end) flushes a range of pages
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */
-#define flush_tlb_all()                                \
-do {                                           \
-       preempt_disable();                      \
-       __flush_tlb_all();                      \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_tlb_mm(mm)                       \
-do {                                           \
-       preempt_disable();                      \
-       __flush_tlb_all();                      \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_tlb_range(vma, start, end)                       \
-do {                                                           \
-       unsigned long __s __attribute__((unused)) = (start);    \
-       unsigned long __e __attribute__((unused)) = (end);      \
-       preempt_disable();                                      \
-       __flush_tlb_all();                                      \
-       preempt_enable();                                       \
-} while (0)
-
-
-#define __flush_tlb_global()                   flush_tlb_all()
-#define flush_tlb()                            flush_tlb_all()
-#define flush_tlb_kernel_range(start, end)                     \
-do {                                                           \
-       unsigned long __s __attribute__((unused)) = (start);    \
-       unsigned long __e __attribute__((unused)) = (end);      \
-       flush_tlb_all();                                        \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end)     do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()            flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+#else   /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr)      local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb()                    flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+                                         unsigned long end)
+{
+       flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+}
 
 #endif /* _ASM_TLBFLUSH_H */
index 197a7af3dd8aeaa7014df5856877de3f0924ee71..679dee0bbd089dddabdbe230abf3874cebb174aa 100644 (file)
@@ -14,9 +14,8 @@
 /*
  * User space memory access functions
  */
-#include <linux/sched.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/errno.h>
 
 #define VERIFY_READ 0
@@ -29,7 +28,6 @@
  *
  * For historical reasons, these macros are grossly misnamed.
  */
-
 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
 
 #define KERNEL_XDS     MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
 
 
 #if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
 /* Optimize just a little bit when we know the size of the move. */
 #define __constant_copy_user(to, from, size)   \
 do {                                           \
index 23f2ab67574c171b72bcecfc1199572c1604ebcd..8f5f1e81baf5c1527a2a82d1380f88918dd38d07 100644 (file)
@@ -3,13 +3,16 @@
 #
 extra-y := head.o init_task.o vmlinux.lds
 
-obj-y   := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y   := process.o signal.o entry.o traps.o irq.o \
           ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
-          switch_to.o mn10300_ksyms.o kernel_execve.o
+          switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
 
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
 
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
 
 obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
                               mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
 
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
 obj-$(CONFIG_GDBSTUB) += gdb-cache.o
 endif
 
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
 obj-$(CONFIG_PROFILE) += profile.o profile-low.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o
index 02dc7e461fef68e8c505cd4e8eea2ae12bbe2b01..96f24fab7de6ee0e559369e195a2fa7b5120a8f2 100644 (file)
@@ -23,6 +23,7 @@ void foo(void)
 
        OFFSET(TI_task,                 thread_info, task);
        OFFSET(TI_exec_domain,          thread_info, exec_domain);
+       OFFSET(TI_frame,                thread_info, frame);
        OFFSET(TI_flags,                thread_info, flags);
        OFFSET(TI_cpu,                  thread_info, cpu);
        OFFSET(TI_preempt_count,        thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
        OFFSET(THREAD_SP,               thread_struct, sp);
        OFFSET(THREAD_A3,               thread_struct, a3);
        OFFSET(THREAD_USP,              thread_struct, usp);
-       OFFSET(THREAD_FRAME,            thread_struct, __frame);
+#ifdef CONFIG_FPU
+       OFFSET(THREAD_FPU_FLAGS,        thread_struct, fpu_flags);
+       OFFSET(THREAD_FPU_STATE,        thread_struct, fpu_state);
+       DEFINE(__THREAD_USING_FPU,      THREAD_USING_FPU);
+       DEFINE(__THREAD_HAS_FPU,        THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+       BLANK();
+
+       OFFSET(TASK_THREAD,             task_struct, thread);
        BLANK();
 
        DEFINE(CLONE_VM_asm,            CLONE_VM);
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
new file mode 100644 (file)
index 0000000..d4cb535
--- /dev/null
@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+                     struct clock_event_device *evt)
+{
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0) {
+               stop_jiffies_counter();
+               reload_jiffies_counter(delta - 1);
+       } else {
+               stop_jiffies_counter1();
+               reload_jiffies_counter1(delta - 1);
+       }
+       return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+                          struct clock_event_device *evt)
+{
+       /* Nothing to do ...  */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd;
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0)
+               stop_jiffies_counter();
+       else
+               stop_jiffies_counter1();
+
+       cd = &per_cpu(mn10300_clockevent_device, cpu);
+       cd->event_handler(cd);
+
+       return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+       struct clock_event_device *cd;
+       struct irqaction *iact;
+       unsigned int cpu = smp_processor_id();
+
+       cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+       if (cpu == 0) {
+               stop_jiffies_counter();
+               cd->irq = TMJCIRQ;
+       } else {
+               stop_jiffies_counter1();
+               cd->irq = TMJC1IRQ;
+       }
+
+       cd->name                = "Timestamp";
+       cd->features            = CLOCK_EVT_FEAT_ONESHOT;
+
+       /* Calculate the min / max delta */
+       clockevent_set_clock(cd, MN10300_JCCLK);
+
+       cd->max_delta_ns        = clockevent_delta2ns(TMJCBR_MAX, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(100, cd);
+
+       cd->rating              = 200;
+       cd->cpumask             = cpumask_of(smp_processor_id());
+       cd->set_mode            = set_clock_mode;
+       cd->event_handler       = event_handler;
+       cd->set_next_event      = next_event;
+
+       iact = &per_cpu(timer_irq, cpu);
+       iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+       iact->handler = timer_interrupt;
+
+       clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+       /* setup timer irq affinity so it only runs on this cpu */
+       {
+               struct irq_desc *desc;
+               desc = irq_to_desc(cd->irq);
+               cpumask_copy(desc->affinity, cpumask_of(cpu));
+               iact->flags |= IRQF_NOBALANCING;
+       }
+#endif
+
+       if (cpu == 0) {
+               reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+               iact->name = "CPU0 Timer";
+       } else {
+               reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+               iact->name = "CPU1 Timer";
+       }
+
+       setup_jiffies_interrupt(cd->irq, iact);
+
+       return 0;
+}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
new file mode 100644 (file)
index 0000000..ba2f0c4
--- /dev/null
@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+       return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+       .name   = "TSC",
+       .rating = 200,
+       .read   = mn10300_read,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+       startup_timestamp_counter();
+       clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+       clocksource_register(&clocksource_mn10300);
+       return 0;
+}
index 3d394b4eefba1e5a90f14c22997b1ccbbcb3fc5e..f00b9bafcd3ebb7aa8d53e0e8db61aedf15652e2 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/frame.inc>
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
 #ifdef CONFIG_PREEMPT
-#define preempt_stop           __cli
+#define preempt_stop           LOCAL_IRQ_DISABLE
 #else
 #define preempt_stop
 #define resume_kernel          restore_all
 #endif
 
-       .macro __cli
-       and     ~EPSW_IM,epsw
-       or      EPSW_IE|MN10300_CLI_LEVEL,epsw
-       nop
-       nop
-       nop
-       .endm
-       .macro __sti
-       or      EPSW_IE|EPSW_IM_7,epsw
-       .endm
-
-
        .am33_2
 
 ###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
 syscall_exit:
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
        mov     (TI_flags,a2),d2
        btst    _TIF_ALLWORK_MASK,d2
        bne     syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
 syscall_exit_work:
        btst    _TIF_SYSCALL_TRACE,d2
        beq     work_pending
-       __sti                           # could let syscall_trace_exit() call
+       LOCAL_IRQ_ENABLE                # could let syscall_trace_exit() call
                                        # schedule() instead
        mov     fp,d0
        call    syscall_trace_exit[],0  # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
 
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
 
        # is there any work to be done other than syscall tracing?
        mov     (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
 ENTRY(resume_userspace)
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
 
        # is there any work to be done on int/exception return?
        mov     (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-       __cli
+       LOCAL_IRQ_DISABLE
        mov     (TI_preempt_count,a2),d0        # non-zero preempt_count ?
        cmp     0,d0
        bne     restore_all
@@ -214,31 +206,6 @@ ENTRY(irq_handler)
 
        jmp     ret_from_intr
 
-###############################################################################
-#
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
-       movbu   (0xae000001),d1
-       cmp     1,d1
-       beq     monsignal
-       ret     [],0
-
-monsignal:
-       or      EPSW_NMID,epsw
-       mov     d0,a0
-       mov     a0,sp
-       mov     (REG_EPSW,fp),d1
-       and     ~EPSW_nSL,d1
-       mov     d1,(REG_EPSW,fp)
-       movm    (sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
-       mov     (sp),a1
-       mov     a1,usp
-       movm    (sp),[other]
-       add     4,sp
-here:  jmp     0x8e000008-here+0x8e000008
-
 ###############################################################################
 #
 # Double Fault handler entry point
@@ -276,6 +243,10 @@ double_fault_loop:
 ENTRY(raw_bus_error)
        add     -4,sp
        mov     d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d0
+       mov     d0,(MMUCTR)
+#endif
        mov     (BCBERR),d0             # what
        btst    BCBERR_BEMR_DMA,d0      # see if it was an external bus error
        beq     __common_exception_aux  # it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
        add     -4,sp
        mov     d0,(sp)
        mov     (TBR),d0
+
+#ifdef CONFIG_SMP
+       add     -4,sp
+       mov     d0,(sp)                 # save d0(TBR)
+       movhu   (NMIAGR),d0
+       and     NMIAGR_GN,d0
+       lsr     0x2,d0
+       cmp     CALL_FUNCTION_NMI_IPI,d0
+       bne     5f                      # if not call function, jump
+
+       # function call nmi ipi
+       add     4,sp                    # no need to store TBR
+       mov     GxICR_DETECT,d0         # clear NMI request
+       movbu   d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+       movhu   (GxICR(CALL_FUNCTION_NMI_IPI)),d0
+       and     ~EPSW_NMID,epsw         # enable NMI
+
+       mov     (sp),d0                 # restore d0
+       SAVE_ALL
+       call    smp_nmi_call_function_interrupt[],0
+       RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+       cmp     GDB_NMI_IPI,d0
+       bne     3f                      # if not gdb nmi ipi, jump
+
+       # gdb nmi ipi
+       add     4,sp                    # no need to store TBR
+       mov     GxICR_DETECT,d0         # clear NMI
+       movbu   d0,(GxICR(GDB_NMI_IPI))
+       movhu   (GxICR(GDB_NMI_IPI)),d0
+       and     ~EPSW_NMID,epsw         # enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       mov     (gdbstub_nmi_opr_type),d0
+       cmp     GDBSTUB_NMI_CACHE_PURGE,d0
+       bne     4f                      # if not gdb cache purge, jump
+
+       # gdb cache purge nmi ipi
+       add     -20,sp
+       mov     d1,(4,sp)
+       mov     a0,(8,sp)
+       mov     a1,(12,sp)
+       mov     mdr,d0
+       mov     d0,(16,sp)
+       call    gdbstub_local_purge_cache[],0
+       mov     0x1,d0
+       mov     (CPUID),d1
+       asl     d1,d0
+       mov     gdbstub_nmi_cpumask,a0
+       bclr    d0,(a0)
+       mov     (4,sp),d1
+       mov     (8,sp),a0
+       mov     (12,sp),a1
+       mov     (16,sp),d0
+       mov     d0,mdr
+       add     20,sp
+       mov     (sp),d0
+       add     4,sp
+       rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+       # gdb wait nmi ipi
+       mov     (sp),d0
+       SAVE_ALL
+       call    gdbstub_nmi_wait[],0
+       RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+       mov     (sp),d0                 # restore TBR to d0
+       add     4,sp
+#endif /* CONFIG_SMP */
+
        bra     __common_exception_nonmi
 
 ENTRY(__common_exception)
        add     -4,sp
        mov     d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d0
+       mov     d0,(MMUCTR)
+#endif
 
 __common_exception_aux:
        mov     (TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
        mov     d0,(REG_ORIG_D0,fp)
 
 #ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+       call    gdbstub_busy_check[],0
+       and     d0,d0                   # check return value
+       beq     2f
+#else  /* CONFIG_SMP */
        btst    0x01,(gdbstub_busy)
        beq     2f
+#endif /* CONFIG_SMP */
        and     ~EPSW_IE,epsw
        mov     fp,d0
        mov     a2,d1
        call    gdbstub_exception[],0   # gdbstub itself caused an exception
        bra     restore_all
 2:
-#endif
+#endif /* CONFIG_GDBSTUB */
 
        mov     fp,d0                   # arg 0: stacked register file
        mov     a2,d1                   # arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
        add     exception_table,d0
        mov     d1,(d0)
        mov     4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
-       jmp     mn10300_dcache_flush_inv_range2
-#else
        ret     [],0
-#endif
 
 ###############################################################################
 #
index 96cfd47e68d50354a4d279845b4f7f7cea40703a..78df25cfae2936f8071ab5d7f312dc363d58c2d6 100644 (file)
@@ -8,25 +8,14 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
+#include <linux/linkage.h>
 #include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
 
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
-       .globl  fpu_init_state
-       .type   fpu_init_state,@function
-fpu_init_state:
-       mov     epsw,d0
-       or      EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
-       nop
-       nop
-       nop
-#endif
+.macro FPU_INIT_STATE_ALL
        fmov    0,fs0
        fmov    fs0,fs1
        fmov    fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
        fmov    fs0,fs30
        fmov    fs0,fs31
        fmov    FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+       fmov    fs0,(\areg+)
+       fmov    fs1,(\areg+)
+       fmov    fs2,(\areg+)
+       fmov    fs3,(\areg+)
+       fmov    fs4,(\areg+)
+       fmov    fs5,(\areg+)
+       fmov    fs6,(\areg+)
+       fmov    fs7,(\areg+)
+       fmov    fs8,(\areg+)
+       fmov    fs9,(\areg+)
+       fmov    fs10,(\areg+)
+       fmov    fs11,(\areg+)
+       fmov    fs12,(\areg+)
+       fmov    fs13,(\areg+)
+       fmov    fs14,(\areg+)
+       fmov    fs15,(\areg+)
+       fmov    fs16,(\areg+)
+       fmov    fs17,(\areg+)
+       fmov    fs18,(\areg+)
+       fmov    fs19,(\areg+)
+       fmov    fs20,(\areg+)
+       fmov    fs21,(\areg+)
+       fmov    fs22,(\areg+)
+       fmov    fs23,(\areg+)
+       fmov    fs24,(\areg+)
+       fmov    fs25,(\areg+)
+       fmov    fs26,(\areg+)
+       fmov    fs27,(\areg+)
+       fmov    fs28,(\areg+)
+       fmov    fs29,(\areg+)
+       fmov    fs30,(\areg+)
+       fmov    fs31,(\areg+)
+       fmov    fpcr,\dreg
+       mov     \dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+       fmov    (\areg+),fs0
+       fmov    (\areg+),fs1
+       fmov    (\areg+),fs2
+       fmov    (\areg+),fs3
+       fmov    (\areg+),fs4
+       fmov    (\areg+),fs5
+       fmov    (\areg+),fs6
+       fmov    (\areg+),fs7
+       fmov    (\areg+),fs8
+       fmov    (\areg+),fs9
+       fmov    (\areg+),fs10
+       fmov    (\areg+),fs11
+       fmov    (\areg+),fs12
+       fmov    (\areg+),fs13
+       fmov    (\areg+),fs14
+       fmov    (\areg+),fs15
+       fmov    (\areg+),fs16
+       fmov    (\areg+),fs17
+       fmov    (\areg+),fs18
+       fmov    (\areg+),fs19
+       fmov    (\areg+),fs20
+       fmov    (\areg+),fs21
+       fmov    (\areg+),fs22
+       fmov    (\areg+),fs23
+       fmov    (\areg+),fs24
+       fmov    (\areg+),fs25
+       fmov    (\areg+),fs26
+       fmov    (\areg+),fs27
+       fmov    (\areg+),fs28
+       fmov    (\areg+),fs29
+       fmov    (\areg+),fs30
+       fmov    (\areg+),fs31
+       mov     (\areg),\dreg
+       fmov    \dreg,fpcr
+.endm
 
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+       .globl  fpu_init_state
+       .type   fpu_init_state,@function
+fpu_init_state:
+       mov     epsw,d0
+       or      EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+       nop
+       nop
+       nop
+#endif
+       FPU_INIT_STATE_ALL
 #ifdef CONFIG_MN10300_PROC_MN103E010
        nop
        nop
@@ -89,40 +171,7 @@ fpu_save:
        nop
 #endif
        mov     d0,a0
-       fmov    fs0,(a0+)
-       fmov    fs1,(a0+)
-       fmov    fs2,(a0+)
-       fmov    fs3,(a0+)
-       fmov    fs4,(a0+)
-       fmov    fs5,(a0+)
-       fmov    fs6,(a0+)
-       fmov    fs7,(a0+)
-       fmov    fs8,(a0+)
-       fmov    fs9,(a0+)
-       fmov    fs10,(a0+)
-       fmov    fs11,(a0+)
-       fmov    fs12,(a0+)
-       fmov    fs13,(a0+)
-       fmov    fs14,(a0+)
-       fmov    fs15,(a0+)
-       fmov    fs16,(a0+)
-       fmov    fs17,(a0+)
-       fmov    fs18,(a0+)
-       fmov    fs19,(a0+)
-       fmov    fs20,(a0+)
-       fmov    fs21,(a0+)
-       fmov    fs22,(a0+)
-       fmov    fs23,(a0+)
-       fmov    fs24,(a0+)
-       fmov    fs25,(a0+)
-       fmov    fs26,(a0+)
-       fmov    fs27,(a0+)
-       fmov    fs28,(a0+)
-       fmov    fs29,(a0+)
-       fmov    fs30,(a0+)
-       fmov    fs31,(a0+)
-       fmov    fpcr,d0
-       mov     d0,(a0)
+       FPU_SAVE_ALL    a0,d0
 #ifdef CONFIG_MN10300_PROC_MN103E010
        nop
        nop
@@ -135,63 +184,75 @@ fpu_save:
 
 ###############################################################################
 #
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is enabled
 #
 ###############################################################################
-       .globl  fpu_restore
-       .type   fpu_restore,@function
-fpu_restore:
-       mov     epsw,d1
-       or      EPSW_FE,epsw            /* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+       .type   fpu_disabled,@function
+       .globl  fpu_disabled
+fpu_disabled:
+       or      EPSW_nAR|EPSW_FE,epsw
        nop
        nop
-#endif
-       mov     d0,a0
-       fmov    (a0+),fs0
-       fmov    (a0+),fs1
-       fmov    (a0+),fs2
-       fmov    (a0+),fs3
-       fmov    (a0+),fs4
-       fmov    (a0+),fs5
-       fmov    (a0+),fs6
-       fmov    (a0+),fs7
-       fmov    (a0+),fs8
-       fmov    (a0+),fs9
-       fmov    (a0+),fs10
-       fmov    (a0+),fs11
-       fmov    (a0+),fs12
-       fmov    (a0+),fs13
-       fmov    (a0+),fs14
-       fmov    (a0+),fs15
-       fmov    (a0+),fs16
-       fmov    (a0+),fs17
-       fmov    (a0+),fs18
-       fmov    (a0+),fs19
-       fmov    (a0+),fs20
-       fmov    (a0+),fs21
-       fmov    (a0+),fs22
-       fmov    (a0+),fs23
-       fmov    (a0+),fs24
-       fmov    (a0+),fs25
-       fmov    (a0+),fs26
-       fmov    (a0+),fs27
-       fmov    (a0+),fs28
-       fmov    (a0+),fs29
-       fmov    (a0+),fs30
-       fmov    (a0+),fs31
-       mov     (a0),d0
-       fmov    d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
        nop
+
+       mov     sp,a1
+       mov     (a1),d1                 /* get epsw of user context */
+       and     ~(THREAD_SIZE-1),a1     /* a1: (thread_info *ti) */
+       mov     (TI_task,a1),a2         /* a2: (task_struct *tsk) */
+       btst    EPSW_nSL,d1
+       beq     fpu_used_in_kernel
+
+       or      EPSW_FE,d1
+       mov     d1,(sp)
+       mov     (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+       or      __THREAD_HAS_FPU,d1
+       mov     d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else  /* !CONFIG_LAZY_SAVE_FPU */
+       mov     (fpu_state_owner),a0
+       cmp     0,a0
+       beq     fpu_regs_save_end
+
+       mov     (TASK_THREAD+THREAD_UREGS,a0),a1
+       add     TASK_THREAD+THREAD_FPU_STATE,a0
+       FPU_SAVE_ALL a0,d0
+
+       mov     (REG_EPSW,a1),d0
+       and     ~EPSW_FE,d0
+       mov     d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+       mov     a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+       btst    __THREAD_USING_FPU,d1
+       beq     fpu_regs_init
+       add     TASK_THREAD+THREAD_FPU_STATE,a2
+       FPU_RESTORE_ALL a2,d0
+       rti
+
+fpu_regs_init:
+       FPU_INIT_STATE_ALL
+       add     TASK_THREAD+THREAD_FPU_FLAGS,a2
+       bset    __THREAD_USING_FPU,(0,a2)
+       rti
+
+fpu_used_in_kernel:
+       and     ~(EPSW_nAR|EPSW_FE),epsw
        nop
        nop
-#endif
 
-       mov     d1,epsw
-       ret     [],0
+       add     -4,sp
+       SAVE_ALL
+       mov     -1,d0
+       mov     d0,(REG_ORIG_D0,fp)
+
+       and     ~EPSW_NMID,epsw
+
+       mov     fp,d0
+       call    fpu_disabled_in_kernel[],0
+       jmp     ret_from_exception
 
-       .size   fpu_restore,.-fpu_restore
+       .size   fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S
new file mode 100644 (file)
index 0000000..7ea087a
--- /dev/null
@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is disabled
+#
+###############################################################################
+       .type   fpu_disabled,@function
+       .globl  fpu_disabled
+fpu_disabled:
+       add     -4,sp
+       SAVE_ALL
+       mov     -1,d0
+       mov     d0,(REG_ORIG_D0,fp)
+
+       and     ~EPSW_NMID,epsw
+
+       mov     fp,d0
+       call    unexpected_fpu_exception[],0
+       jmp     ret_from_exception
+
+       .size   fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c
new file mode 100644 (file)
index 0000000..31c765b
--- /dev/null
@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ *   be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+       panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+       return 0; /* not valid */
+}
index e705f25ad5ff7e5e44b8da939d989b778ca77f6e..5f9c3fa19a85fb4459987789229387352cf1a809 100644 (file)
 #include <asm/fpu.h>
 #include <asm/elf.h>
 #include <asm/exceptions.h>
+#include <asm/system.h>
 
+#ifdef CONFIG_LAZY_SAVE_FPU
 struct task_struct *fpu_state_owner;
+#endif
 
 /*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
  */
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
 {
-       struct task_struct *tsk = current;
-
-       if (!user_mode(regs))
-               die_if_no_fixup("An FPU Disabled exception happened in"
-                               " kernel space\n",
-                               regs, code);
-
-#ifdef CONFIG_FPU
-       preempt_disable();
-
-       /* transfer the last process's FPU state to memory */
-       if (fpu_state_owner) {
-               fpu_save(&fpu_state_owner->thread.fpu_state);
-               fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
-       }
-
-       /* the current process now owns the FPU state */
-       fpu_state_owner = tsk;
-       regs->epsw |= EPSW_FE;
-
-       /* load the FPU with the current process's FPU state or invent a new
-        * clean one if the process doesn't have one */
-       if (is_using_fpu(tsk)) {
-               fpu_restore(&tsk->thread.fpu_state);
-       } else {
-               fpu_init_state();
-               set_using_fpu(tsk);
-       }
-
-       preempt_enable();
-#else
-       {
-               siginfo_t info;
-
-               info.si_signo = SIGFPE;
-               info.si_errno = 0;
-               info.si_addr = (void *) tsk->thread.uregs->pc;
-               info.si_code = FPE_FLTINV;
-
-               force_sig_info(SIGFPE, &info, tsk);
-       }
-#endif  /* CONFIG_FPU */
+       die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+                       regs, EXCEP_FPU_DISABLED);
 }
 
 /*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
  */
 asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
 {
-       struct task_struct *tsk = fpu_state_owner;
+       struct task_struct *tsk = current;
        siginfo_t info;
+       u32 fpcr;
 
        if (!user_mode(regs))
                die_if_no_fixup("An FPU Operation exception happened in"
                                " kernel space\n",
                                regs, code);
 
-       if (!tsk)
+       if (!is_using_fpu(tsk))
                die_if_no_fixup("An FPU Operation exception happened,"
                                " but the FPU is not in use",
                                regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
        info.si_addr = (void *) tsk->thread.uregs->pc;
        info.si_code = FPE_FLTINV;
 
-#ifdef CONFIG_FPU
-       {
-               u32 fpcr;
+       unlazy_fpu(tsk);
 
-               /* get FPCR (we need to enable the FPU whilst we do this) */
-               asm volatile("  or      %1,epsw         \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-                            "  nop                     \n"
-                            "  nop                     \n"
-                            "  nop                     \n"
-#endif
-                            "  fmov    fpcr,%0         \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-                            "  nop                     \n"
-                            "  nop                     \n"
-                            "  nop                     \n"
-#endif
-                            "  and     %2,epsw         \n"
-                            : "=&d"(fpcr)
-                            : "i"(EPSW_FE), "i"(~EPSW_FE)
-                            );
-
-               if (fpcr & FPCR_EC_Z)
-                       info.si_code = FPE_FLTDIV;
-               else if (fpcr & FPCR_EC_O)
-                       info.si_code = FPE_FLTOVF;
-               else if (fpcr & FPCR_EC_U)
-                       info.si_code = FPE_FLTUND;
-               else if (fpcr & FPCR_EC_I)
-                       info.si_code = FPE_FLTRES;
-       }
-#endif
+       fpcr = tsk->thread.fpu_state.fpcr;
+
+       if (fpcr & FPCR_EC_Z)
+               info.si_code = FPE_FLTDIV;
+       else if (fpcr & FPCR_EC_O)
+               info.si_code = FPE_FLTOVF;
+       else if (fpcr & FPCR_EC_U)
+               info.si_code = FPE_FLTUND;
+       else if (fpcr & FPCR_EC_I)
+               info.si_code = FPE_FLTRES;
 
        force_sig_info(SIGFPE, &info, tsk);
 }
 
+/*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+       siginfo_t info;
+
+       if (!user_mode(regs))
+               die_if_no_fixup("FPU invalid opcode", regs, code);
+
+       info.si_signo = SIGILL;
+       info.si_errno = 0;
+       info.si_code = ILL_COPROC;
+       info.si_addr = (void *) regs->pc;
+       force_sig_info(info.si_signo, &info, current);
+}
+
 /*
  * save the FPU state to a signal context
  */
 int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 {
-#ifdef CONFIG_FPU
        struct task_struct *tsk = current;
 
        if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
         */
        preempt_disable();
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               fpu_save(&tsk->thread.fpu_state);
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+       }
+#else /* !CONFIG_LAZY_SAVE_FPU */
        if (fpu_state_owner == tsk) {
                fpu_save(&tsk->thread.fpu_state);
                fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
                fpu_state_owner = NULL;
        }
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
        preempt_enable();
 
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
                return -1;
 
        return 1;
-#else
-       return 0;
-#endif
 }
 
 /*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
  */
 void fpu_kill_state(struct task_struct *tsk)
 {
-#ifdef CONFIG_FPU
        /* disown anything left in the FPU */
        preempt_disable();
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+       }
+#else /* !CONFIG_LAZY_SAVE_FPU */
        if (fpu_state_owner == tsk) {
                fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
                fpu_state_owner = NULL;
        }
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
        preempt_enable();
-#endif
+
        /* we no longer have a valid current FPU state */
        clear_using_fpu(tsk);
 }
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
        int ret;
 
        /* load up the old FPU state */
-       ret = copy_from_user(&tsk->thread.fpu_state,
-                            fpucontext,
+       ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
                             min(sizeof(struct fpu_state_struct),
                                 sizeof(struct fpucontext)));
        if (!ret)
index 4998b24f5d3a4e2081e3983b3b0e43cdf86bc670..b1d0152e96cb95741b0e31fd2ebf0782bbb0283e 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/thread_info.h>
 #include <asm/frame.inc>
 #include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
 #include <unit/serial.h>
 
        .text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
        bra     gdbstub_io_rx_done
 
 gdbstub_io_rx_enter:
-       or      EPSW_IE|EPSW_IM_1,epsw
+       LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
        add     -4,sp
        SAVE_ALL
 
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
        mov     fp,d0
        call    gdbstub_rx_irq[],0      # gdbstub_rx_irq(regs,excep)
 
-       and     ~EPSW_IE,epsw
+       LOCAL_CLI
        bclr    0x01,(gdbstub_busy)
 
        .globl gdbstub_return
index ae663dc717e94ab807fbeb3169955e8fbaebd04b..0d5d63c91dc3ee74457d183a11d59a149022d0c5 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/exceptions.h>
 #include <asm/serial-regs.h>
 #include <unit/serial.h>
+#include <asm/smp.h>
 
 /*
  * initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
        XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
        tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
+#if   CONFIG_GDBSTUB_IRQ_LEVEL == 0
        IVAR0 = EXCEP_IRQ_LEVEL0;
-       set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+               gdbstub_io_rx_handler);
 
        XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
-       XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+       XIRQxICR(GDBPORT_SERIAL_IRQ) =
+               GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
        tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
        GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
 
        /* permit level 0 IRQs to take place */
-       asm volatile(
-               "       and %0,epsw     \n"
-               "       or %1,epsw      \n"
-               :
-               : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
-               );
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 
 /*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
        unsigned ix;
        u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+       int cpu;
+#endif
 
        *_ch = 0xff;
 
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
                if (nonblock)
                        return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
-               watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               watchdog_alert_counter[cpu] = 0;
+#endif
                goto try_again;
        }
 
index a560bbc3137d3c93fd2a14eddbb79ed5b8abc23f..97dfda23342c8d9fe8aef19d8c9d4dfaeb170a7d 100644 (file)
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
        gdbstub_io_set_baud(115200);
 
        /* we want to get serial receive interrupts */
-       set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
-       set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
-       set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+       set_intr_level(gdbstub_port->rx_irq,
+               NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+       set_intr_level(gdbstub_port->tx_irq,
+               NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+               gdbstub_io_rx_handler);
 
        *gdbstub_port->rx_icr |= GxICR_ENABLE;
        tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
        tmp = *gdbstub_port->_control;
 
        /* permit level 0 IRQs only */
-       asm volatile(
-               "       and %0,epsw     \n"
-               "       or %1,epsw      \n"
-               :
-               : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
-               );
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 
 /*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
        unsigned ix;
        u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+       int cpu;
+#endif
 
        *_ch = 0xff;
 
@@ -201,8 +202,9 @@ try_again:
                if (nonblock)
                        return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
-               watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               watchdog_alert_counter[cpu] = 0;
+#endif
                goto try_again;
        }
 
index 41b11706c8ed1ee70f0742bec2d9ac301187aea1..a5fc3f05309b2fc26960a36fe68ef1339683c4fd 100644 (file)
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
 
 static int __gdbstub_mark_bp(u8 *addr, int ix)
 {
-       if (addr < (u8 *) 0x70000000UL)
-               return 0;
-       /* 70000000-7fffffff: vmalloc area */
-       if (addr < (u8 *) 0x80000000UL)
+       /* vmalloc area */
+       if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
                goto okay;
-       if (addr < (u8 *) 0x8c000000UL)
-               return 0;
-       /* 8c000000-93ffffff: SRAM, SDRAM */
-       if (addr < (u8 *) 0x94000000UL)
+       /* SRAM, SDRAM */
+       if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
                goto okay;
        return 0;
 
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
        mn10300_set_gdbleds(1);
 
        asm volatile("mov mdr,%0" : "=d"(mdr));
-       asm volatile("mov epsw,%0" : "=d"(epsw));
-       asm volatile("mov %0,epsw"
-                    :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+       local_save_flags(epsw);
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 
        gdbstub_store_fpu();
 
index 14f27f3bfaf4f9007b86825cfb4ff5416acb196c..73e00fc78072d69623b5baf2e06f45ac4aa29a0b 100644 (file)
 #include <asm/frame.inc>
 #include <asm/param.h>
 #include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
        __HEAD
 
        .globl  _start
        .type   _start,@function
 _start:
+#ifdef CONFIG_SMP
+       #
+       # If this is a secondary CPU (AP), then deal with that elsewhere
+       #
+       mov     (CPUID),d3
+       and     CPUID_MASK,d3
+       bne     startup_secondary
+
+       #
+       # We're dealing with the primary CPU (BP) here, then.
+       # Keep BP's D0,D1,D2 register for boot check.
+       #
+
+       # Set up the Boot IPI for each secondary CPU
+       mov     0x1,a0
+loop_set_secondary_icr:
+       mov     a0,a1
+       asl     CROSS_ICR_CPU_SHIFT,a1
+       add     CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+       movhu   (a1),d3
+       or      GxICR_ENABLE|GxICR_LEVEL_0,d3
+       movhu   d3,(a1)
+       movhu   (a1),d3                         # flush
+       inc     a0
+       cmp     NR_CPUS,a0
+       bne     loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
        # save commandline pointer
        mov     d0,a3
 
        # preload the PGD pointer register
        mov     swapper_pg_dir,d0
        mov     d0,(PTBR)
+       clr     d0
+       movbu   d0,(PIDR)
 
        # turn on the TLBs
        mov     MMUCTR_IIV|MMUCTR_DIV,d0
        mov     d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
        mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
        mov     d0,(MMUCTR)
 
        # turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
        mov     d0,(TBR)
 
        # invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+       mov     ECHCTR,a0
+       clr     d0
+       mov     d0,(a0)
+#endif
        mov     CHCTR,a0
        clr     d0
        movhu   d0,(a0)                                 # turn off first
@@ -61,18 +106,18 @@ _start:
        btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy
        lne
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
 #else
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
 #else
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
 #endif /* WBACK */
        movhu   d0,(a0)                                 # enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
 
        # turn on RTS on the debug serial port if applicable
 #ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
        call    processor_init[],0
        call    unit_init[],0
 
+#ifdef CONFIG_SMP
+       # mark the primary CPU in cpu_boot_map
+       mov     cpu_boot_map,a0
+       mov     0x1,d0
+       mov     d0,(a0)
+
+       # signal each secondary CPU to begin booting
+       mov     0x1,d2                          # CPU ID
+
+loop_request_boot_secondary:
+       mov     d2,a0
+       # send SMP_BOOT_IPI to secondary CPU
+       asl     CROSS_ICR_CPU_SHIFT,a0
+       add     CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+       movhu   (a0),d0
+       or      GxICR_REQUEST|GxICR_DETECT,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0                         # flush
+
+       # wait up to 100ms for AP's IPI to be received
+       clr     d3
+wait_on_secondary_boot:
+       mov     DELAY_TIME_BOOT_IPI,d0
+       call    __delay[],0
+       inc     d3
+       mov     cpu_boot_map,a0
+       mov     (a0),d0
+       lsr     d2,d0
+       btst    0x1,d0
+       bne     1f
+       cmp     TIME_OUT_COUNT_BOOT_IPI,d3
+       bne     wait_on_secondary_boot
+1:
+       inc     d2
+       cmp     NR_CPUS,d2
+       bne     loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
 #ifdef CONFIG_GDBSTUB
        call    gdbstub_init[],0
 
@@ -217,7 +300,118 @@ __gdbstub_pause:
 #endif
 
        jmp     start_kernel
-       .size   _start, _start-.
+       .size   _start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+       # preload the PGD pointer register
+       mov     swapper_pg_dir,d0
+       mov     d0,(PTBR)
+       clr     d0
+       movbu   d0,(PIDR)
+
+       # turn on the TLBs
+       mov     MMUCTR_IIV|MMUCTR_DIV,d0
+       mov     d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+       mov     d0,(MMUCTR)
+
+       # turn on AM33v2 exception handling mode and set the trap table base
+       movhu   (CPUP),d0
+       or      CPUP_EXM_AM33V2,d0
+       movhu   d0,(CPUP)
+
+       # set the interrupt vector table
+       mov     CONFIG_INTERRUPT_VECTOR_BASE,d0
+       mov     d0,(TBR)
+
+       # invalidate and enable both of the caches
+       mov     ECHCTR,a0
+       clr     d0
+       mov     d0,(a0)
+       mov     CHCTR,a0
+       clr     d0
+       movhu   d0,(a0)                                 # turn off first
+       mov     CHCTR_ICINV|CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       setlb
+       mov     (a0),d0
+       btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy (use CPU loop buffer)
+       lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif  /* !NOWRALLOC */
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif  /* WBACK */
+       movhu   d0,(a0)                                 # enable
+#endif  /* ENABLED */
+
+       # Clear the boot IPI interrupt for this CPU
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0
+       and     ~GxICR_REQUEST,d0
+       movhu   d0,(GxICR(SMP_BOOT_IRQ))
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0                # flush
+
+       /* get stack */
+       mov     CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+       mov     (CPUID),d0
+       and     CPUID_MASK,d0
+       mulu    CONFIG_BOOT_STACK_SIZE,d0
+       sub     d0,a0
+       mov     a0,sp
+
+       # init interrupt for AP
+       call    smp_prepare_cpu_init[],0
+
+       # mark this secondary CPU in cpu_boot_map
+       mov     (CPUID),d0
+       mov     0x1,d1
+       asl     d0,d1
+       mov     cpu_boot_map,a0
+       bset    d1,(a0)
+
+       or      EPSW_IE|EPSW_IM_1,epsw  # permit level 0 interrupts
+       nop
+       nop
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+       # flush the local cache if it's in writeback mode
+       call    mn10300_local_dcache_flush_inv[],0
+       setlb
+       mov     (CHCTR),d0
+       btst    CHCTR_DCBUSY,d0         # wait till not busy (use CPU loop buffer)
+       lne
+#endif
+
+       # now sleep waiting for further instructions
+secondary_sleep:
+       mov     CPUM_SLEEP,d0
+       movhu   d0,(CPUM)
+       nop
+       nop
+       bra     secondary_sleep
+       .size   startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
 ENTRY(__head_end)
 
 /*
index eee2eee86267bd961854bf6e91daddcb4e5961f3..6a064ab5af0774b15887d9432209a9e2f9abe208 100644 (file)
@@ -9,6 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+struct clocksource;
+struct clock_event_device;
+
 /*
  * kthread.S
  */
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
  * entry.S
  */
 extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif
index e2d5ed891f37b6c7471108f25c2e57dd425ba28f..c2e44597c22b1fc5b69ebf13cca307d43c2d1a52 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
+#include <linux/cpumask.h>
 #include <asm/setup.h>
+#include <asm/serial-regs.h>
 
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+       [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
 
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+       [0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS   ((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+       [0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif  /* CONFIG_SMP */
+
 atomic_t irq_err_count;
 
 /*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
  */
 static void mn10300_cpupic_ack(unsigned int irq)
 {
+       unsigned long flags;
        u16 tmp;
-       *(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       GxICR_u8(irq) = GxICR_DETECT;
        tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
 }
 
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+                              unsigned int mask, unsigned int set)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL);
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & mask) | set;
        tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+       __mask_and_set_icr(irq, GxICR_LEVEL, 0);
 }
 
 static void mn10300_cpupic_mask_ack(unsigned int irq)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
-       tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       if (!test_and_clear_bit(irq, irq_affinity_request)) {
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+               tmp = GxICR(irq);
+       } else {
+               u16 tmp2;
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL);
+               tmp2 = GxICR(irq);
+
+               irq_affinity_online[irq] =
+                       any_online_cpu(*irq_desc[irq].affinity);
+               CROSS_GxICR(irq, irq_affinity_online[irq]) =
+                       (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+               tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+       }
+
+       arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 
 static void mn10300_cpupic_unmask(unsigned int irq)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
-       tmp = GxICR(irq);
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
 }
 
 static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
         * device has ceased to assert its interrupt line and the interrupt
         * channel has been disabled in the PIC, so for level-triggered
         * interrupts we need to clear the request bit when we re-enable */
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
-       tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       if (!test_and_clear_bit(irq, irq_affinity_request)) {
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+               tmp = GxICR(irq);
+       } else {
+               tmp = GxICR(irq);
+
+               irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+               CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+               tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+       }
+
+       arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+       unsigned long flags;
+       int err;
+
+       flags = arch_local_cli_save();
+
+       /* check irq no */
+       switch (irq) {
+       case TMJCIRQ:
+       case RESCHEDULE_IPI:
+       case CALL_FUNC_SINGLE_IPI:
+       case LOCAL_TIMER_IPI:
+       case FLUSH_CACHE_IPI:
+       case CALL_FUNCTION_NMI_IPI:
+       case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+       case SC0RXIRQ:
+       case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+       case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+       case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+       case SC1RXIRQ:
+       case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+       case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+       case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+       case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+       case SC2RXIRQ:
+       case SC2TXIRQ:
+       case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+               err = -1;
+               break;
+
+       default:
+               set_bit(irq, irq_affinity_request);
+               err = 0;
+               break;
+       }
+
+       arch_local_irq_restore(flags);
+       return err;
+}
+#endif /* CONFIG_SMP */
+
 /*
  * MN10300 PIC level-triggered IRQ handling.
  *
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
        .mask           = mn10300_cpupic_mask,
        .mask_ack       = mn10300_cpupic_mask,
        .unmask         = mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+       .set_affinity   = mn10300_cpupic_setaffinity,
+#endif
 };
 
 /*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
        .mask           = mn10300_cpupic_mask,
        .mask_ack       = mn10300_cpupic_mask_ack,
        .unmask         = mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+       .set_affinity   = mn10300_cpupic_setaffinity,
+#endif
 };
 
 /*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
  */
 void set_intr_level(int irq, u16 level)
 {
-       u16 tmp;
+       BUG_ON(in_interrupt());
 
-       if (in_interrupt())
-               BUG();
+       __mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
 
-       tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_ENABLE) | level;
-       tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+       set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+       __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+       __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+       mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+       mn10300_cpupic_mask(irq);
 }
 
 /*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
  * than before
  * - see Documentation/mn10300/features.txt
  */
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
 {
        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
                                 handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
                         * interrupts */
                        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
                                                 handle_level_irq);
+
        unit_init_IRQ();
 }
 
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
 asmlinkage void do_IRQ(void)
 {
        unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+       unsigned int cpu_id = smp_processor_id();
        int irq;
 
        sp = current_stack_pointer();
-       if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
-               BUG();
+       BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
 
        /* make sure local_irq_enable() doesn't muck up the interrupt priority
         * setting in EPSW */
-       old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+       old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
        local_save_flags(epsw);
-       __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+       __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
        irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
 
-       __IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+       __IRQ_STAT(cpu_id, __irq_count)++;
+#endif
 
        irq_enter();
 
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
                local_irq_restore(epsw);
        }
 
-       __mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+       __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
 
        irq_exit();
 }
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
                        seq_printf(p, "%3d: ", i);
                        for_each_present_cpu(cpu)
                                seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-                       seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
-                                  (GxICR(i) & GxICR_LEVEL) >>
-                                  GxICR_LEVEL_SHIFT);
+
+                       if (i < NR_CPU_IRQS)
+                               seq_printf(p, " %14s.%u",
+                                          irq_desc[i].chip->name,
+                                          (GxICR(i) & GxICR_LEVEL) >>
+                                          GxICR_LEVEL_SHIFT);
+                       else
+                               seq_printf(p, " %14s",
+                                          irq_desc[i].chip->name);
+
                        seq_printf(p, "  %s", action->name);
 
                        for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
 
                /* polish off with NMI and error counters */
        case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
                seq_printf(p, "NMI: ");
                for (j = 0; j < NR_CPUS; j++)
                        if (cpu_online(j))
                                seq_printf(p, "%10u ", nmi_count(j));
                seq_putc(p, '\n');
+#endif
 
                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
                break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
 
        return 0;
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+       irq_desc_t *desc;
+       int irq;
+       unsigned int self, new;
+       unsigned long flags;
+
+       self = smp_processor_id();
+       for (irq = 0; irq < NR_IRQS; irq++) {
+               desc = irq_desc + irq;
+
+               if (desc->status == IRQ_PER_CPU)
+                       continue;
+
+               if (cpu_isset(self, irq_desc[irq].affinity) &&
+                   !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+                       int cpu_id;
+                       cpu_id = first_cpu(cpu_online_map);
+                       cpu_set(cpu_id, irq_desc[irq].affinity);
+               }
+               /* We need to operate irq_affinity_online atomically. */
+               arch_local_cli_save(flags);
+               if (irq_affinity_online[irq] == self) {
+                       u16 x, tmp;
+
+                       x = GxICR(irq);
+                       GxICR(irq) = x & GxICR_LEVEL;
+                       tmp = GxICR(irq);
+
+                       new = any_online_cpu(irq_desc[irq].affinity);
+                       irq_affinity_online[irq] = new;
+
+                       CROSS_GxICR(irq, new) =
+                               (x & GxICR_LEVEL) | GxICR_DETECT;
+                       tmp = CROSS_GxICR(irq, new);
+
+                       x &= GxICR_LEVEL | GxICR_ENABLE;
+                       if (GxICR(irq) & GxICR_REQUEST) {
+                               x |= GxICR_REQUEST | GxICR_DETECT;
+                       CROSS_GxICR(irq, new) = x;
+                       tmp = CROSS_GxICR(irq, new);
+               }
+               arch_local_irq_restore(flags);
+       }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
index 67e6389d625a43a7cf62d4d1f5358233b5fef3d7..0311a7fcea16999c0ad6c6aa6dcfd23720ac953d 100644 (file)
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
 
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
 {
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush();
        mn10300_icache_inv();
+#endif
 }
 
 void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
 {
        *p->addr = p->opcode;
        regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush();
        mn10300_icache_inv();
+#endif
 }
 
 static inline
index 66702d256610cd765785b0b0495388dc8ddf0af3..dfc1b6f2fa9a4b70f251d8766c773c39fc7733fe 100644 (file)
@@ -39,7 +39,7 @@
 ###############################################################################
        .balign L1_CACHE_BYTES
 ENTRY(mn10300_serial_vdma_interrupt)
-       or      EPSW_IE,psw                     # permit overriding by
+#      or      EPSW_IE,psw                     # permit overriding by
                                                # debugging interrupts
        movm    [d2,d3,a2,a3,exreg0],(sp)
 
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
        rti
 
 mnsc_vdma_tx_empty:
-       mov     +(GxICR_LEVEL_1|GxICR_DETECT),d2
+       mov     +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
        movhu   d2,(e3)                 # disable the interrupt
        movhu   (e3),d2                 # flush
 
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
        movhu   (SCxCTR,e2),d2          # turn on break mode
        or      SC01CTR_BKE,d2
        movhu   d2,(SCxCTR,e2)
-       mov     +(GxICR_LEVEL_1|GxICR_DETECT),d2
+       mov     +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
        movhu   d2,(e3)                 # disable transmit interrupts on this
                                        # channel
        movhu   (e3),d2                 # flush
index db509dd80565b9e91c2b661c08c5fd64bc52e8d4..996384dba45da8dabca666d78328a00811aa4931 100644 (file)
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
 #include <unit/timex.h>
 #include "mn10300-serial.h"
 
+#ifdef CONFIG_SMP
+#undef  GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
 #define kenter(FMT, ...) \
        printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
 #define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
 #define _proto(FMT, ...) \
        no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
 
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB 004000000000    /* change Transfer bit-order */
+#endif
+
 #define NR_UARTS 3
 
 #ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
        .name           = "ttySM0",
        ._iobase        = &SC0CTR,
        ._control       = &SC0CTR,
-       ._status        = (volatile u8 *) &SC0STR,
+       ._status        = (volatile u8 *)&SC0STR,
        ._intr          = &SC0ICR,
        ._rxb           = &SC0RXB,
        ._txb           = &SC0TXB,
        .rx_name        = "ttySM0:Rx",
        .tx_name        = "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
        .tm_name        = "ttySM0:Timer8",
        ._tmxmd         = &TM8MD,
        ._tmxbr         = &TM8BR,
        ._tmicr         = &TM8ICR,
        .tm_irq         = TM8IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+       .tm_name        = "ttySM0:Timer0",
+       ._tmxmd         = &TM0MD,
+       ._tmxbr         = (volatile u16 *)&TM0BR,
+       ._tmicr         = &TM0ICR,
+       .tm_irq         = TM0IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
        .tm_name        = "ttySM0:Timer2",
        ._tmxmd         = &TM2MD,
-       ._tmxbr         = (volatile u16 *) &TM2BR,
+       ._tmxbr         = (volatile u16 *)&TM2BR,
        ._tmicr         = &TM2ICR,
        .tm_irq         = TM2IRQ,
        .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
 #endif
        .rx_irq         = SC0RXIRQ,
        .tx_irq         = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
        .name           = "ttySM1",
        ._iobase        = &SC1CTR,
        ._control       = &SC1CTR,
-       ._status        = (volatile u8 *) &SC1STR,
+       ._status        = (volatile u8 *)&SC1STR,
        ._intr          = &SC1ICR,
        ._rxb           = &SC1RXB,
        ._txb           = &SC1TXB,
        .rx_name        = "ttySM1:Rx",
        .tx_name        = "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
        .tm_name        = "ttySM1:Timer9",
        ._tmxmd         = &TM9MD,
        ._tmxbr         = &TM9BR,
        ._tmicr         = &TM9ICR,
        .tm_irq         = TM9IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        .tm_name        = "ttySM1:Timer3",
        ._tmxmd         = &TM3MD,
-       ._tmxbr         = (volatile u16 *) &TM3BR,
+       ._tmxbr         = (volatile u16 *)&TM3BR,
        ._tmicr         = &TM3ICR,
        .tm_irq         = TM3IRQ,
        .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+       .tm_name        = "ttySM1/Timer12",
+       ._tmxmd         = &TM12MD,
+       ._tmxbr         = &TM12BR,
+       ._tmicr         = &TM12ICR,
+       .tm_irq         = TM12IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
 #endif
        .rx_irq         = SC1RXIRQ,
        .tx_irq         = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
        .uart.lock      =
        __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
        .name           = "ttySM2",
-       .rx_name        = "ttySM2:Rx",
-       .tx_name        = "ttySM2:Tx",
-       .tm_name        = "ttySM2:Timer10",
        ._iobase        = &SC2CTR,
        ._control       = &SC2CTR,
-       ._status        = &SC2STR,
+       ._status        = (volatile u8 *)&SC2STR,
        ._intr          = &SC2ICR,
        ._rxb           = &SC2RXB,
        ._txb           = &SC2TXB,
+       .rx_name        = "ttySM2:Rx",
+       .tx_name        = "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+       .tm_name        = "ttySM2/Timer10",
        ._tmxmd         = &TM10MD,
        ._tmxbr         = &TM10BR,
        ._tmicr         = &TM10ICR,
        .tm_irq         = TM10IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+       .tm_name        = "ttySM2/Timer9",
+       ._tmxmd         = &TM9MD,
+       ._tmxbr         = &TM9BR,
+       ._tmicr         = &TM9ICR,
+       .tm_irq         = TM9IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+       .tm_name        = "ttySM2/Timer1",
+       ._tmxmd         = &TM1MD,
+       ._tmxbr         = (volatile u16 *)&TM1BR,
+       ._tmicr         = &TM1ICR,
+       .tm_irq         = TM1IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+       .tm_name        = "ttySM2/Timer3",
+       ._tmxmd         = &TM3MD,
+       ._tmxbr         = (volatile u16 *)&TM3BR,
+       ._tmicr         = &TM3ICR,
+       .tm_irq         = TM3IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
        .rx_irq         = SC2RXIRQ,
        .tx_irq         = SC2TXIRQ,
        .rx_icr         = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
  */
 static void mn10300_serial_mask_ack(unsigned int irq)
 {
+       unsigned long flags;
        u16 tmp;
+
+       flags = arch_local_cli_save();
        GxICR(irq) = GxICR_LEVEL_6;
        tmp = GxICR(irq); /* flush write buffer */
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
 
 static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        x = *port->tx_icr;
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+       flags = arch_local_cli_save();
+       *port->tx_icr =
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
        x = *port->tx_icr;
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        x = *port->rx_icr;
+       arch_local_irq_restore(flags);
 }
 
 /*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
 static void mn10300_serial_set_mctrl(struct uart_port *_port,
                                     unsigned int mctrl)
 {
-       struct mn10300_serial_port *port =
+       struct mn10300_serial_port *port __attribute__ ((unused)) =
                container_of(_port, struct mn10300_serial_port, uart);
 
        _enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
                        UART_XMIT_SIZE));
 
        /* kick the virtual DMA controller */
+       arch_local_cli();
        x = *port->tx_icr;
        x |= GxICR_ENABLE;
 
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
 
        _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
               *port->_control, *port->_intr, *port->_status,
-              *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+              *port->_tmxmd,
+              (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+                  *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+              *port->tx_icr);
 
        *port->tx_icr = x;
        x = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
        pint->port = port;
        pint->vdma = mn10300_serial_vdma_tx_handler;
 
-       set_intr_level(port->rx_irq, GxICR_LEVEL_1);
-       set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+       set_intr_level(port->rx_irq,
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+       set_intr_level(port->tx_irq,
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
        set_irq_chip(port->tm_irq, &mn10300_serial_pic);
 
        if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
  */
 static void mn10300_serial_shutdown(struct uart_port *_port)
 {
+       u16 x;
        struct mn10300_serial_port *port =
                container_of(_port, struct mn10300_serial_port, uart);
 
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
        free_irq(port->rx_irq, port);
        free_irq(port->tx_irq, port);
 
-       *port->rx_icr = GxICR_LEVEL_1;
-       *port->tx_icr = GxICR_LEVEL_1;
+       arch_local_cli();
+       *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+       x = *port->rx_icr;
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+       x = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
        /* Determine divisor based on baud rate */
        battempt = 0;
 
-       if (div_timer == MNSCx_DIV_TIMER_16BIT)
-               scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
-                                                *   == SC2CTR_CK_TM10UFLOW) */
-       else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+       switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+       case 0: /* ttySM0 */
+#if   defined(CONFIG_MN10300_TTYSM0_TIMER8)
+               scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+               scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
                scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+               break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+       case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if   defined(CONFIG_MN10300_TTYSM1_TIMER9)
+               scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+               scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+               scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+               break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+       case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER10)
+               scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER9)
+               scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+               scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+               scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+               break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+       default:
+               break;
+       }
 
 try_alternative:
        baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
                ctr &= ~SC2CTR_TWE;
                *port->_control = ctr;
        }
+
+       /* change Transfer bit-order (LSB/MSB) */
+       if (new->c_cflag & CODMSB)
+               *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+       else
+               *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
 }
 
 /*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
        printk(KERN_INFO "%s version %s (%s)\n",
               serial_name, serial_version, serial_revdate);
 
-#ifdef CONFIG_MN10300_TTYSM2
-       SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+       {
+               int tmp;
+               SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+               tmp = SC2TIM;
+       }
 #endif
 
-       set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+               mn10300_serial_vdma_interrupt);
 
        ret = uart_register_driver(&mn10300_serial_driver);
        if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
        port = mn10300_serial_ports[co->index];
 
        /* firstly hijack the serial port from the "virtual DMA" controller */
+       arch_local_cli();
        txicr = *port->tx_icr;
-       *port->tx_icr = GxICR_LEVEL_1;
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        tmp = *port->tx_icr;
+       arch_local_sti();
 
        /* the transmitter may be disabled */
        scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
        if (!(scxctr & SC01CTR_TXE))
                *port->_control = scxctr;
 
+       arch_local_cli();
        *port->tx_icr = txicr;
        tmp = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
index 996244745ccae8f8fe990cac2e34fd502eb0545d..f2f5c9cfaabd803e02b95fad30267e1cde8a35c3 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/intctl-regs.h>
 #include <asm/timer-regs.h>
 #include <asm/frame.inc>
+#include <linux/threads.h>
 
        .text
 
@@ -53,7 +54,13 @@ watchdog_handler:
        .type   touch_nmi_watchdog,@function
 touch_nmi_watchdog:
        clr     d0
-       mov     d0,(watchdog_alert_counter)
+       clr     d1
+       mov     watchdog_alert_counter, a0
+       setlb
+       mov     d0, (a0+)
+       inc     d1
+       cmp     NR_CPUS, d1
+       lne
        ret     [],0
 
        .size   touch_nmi_watchdog,.-touch_nmi_watchdog
index f362d9d138f1ad90012d5fa5e69f1f8b387dbcba..c5e12bfd9fcdbb2e28d5b42d1916c2b08f2d20c5 100644 (file)
@@ -30,7 +30,7 @@
 static DEFINE_SPINLOCK(watchdog_print_lock);
 static unsigned int watchdog;
 static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
 
 EXPORT_SYMBOL(touch_nmi_watchdog);
 
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
  * is to check its timer makes IRQ counts. If they are not
  * changing then that CPU has some problem.
  *
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
  * since NMIs dont listen to _any_ locks, we have to be extremely
  * careful not to rely on unsafe variables. The printk might lock
  * up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
 
        printk(KERN_INFO "OK.\n");
 
-       /* now that we know it works we can reduce NMI frequency to
-        * something more reasonable; makes a difference in some configs
+       /* now that we know it works we can reduce NMI frequency to something
+        * more reasonable; makes a difference in some configs
         */
        watchdog_hz = 1;
 
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
        }
 }
 
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+       printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+       show_registers(current_frame());
+}
+#endif
+
 asmlinkage
 void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 {
-
        /*
         * Since current-> is always on the stack, and we always switch
         * the stack NMI-atomically, it's safe to use smp_processor_id().
         */
-       int sum, cpu = smp_processor_id();
+       int sum, cpu;
        int irq = NMIIRQ;
        u8 wdt, tmp;
 
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        tmp = WDCTR;
        NMICR = NMICR_WDIF;
 
-       nmi_count(cpu)++;
+       nmi_count(smp_processor_id())++;
        kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
-       sum = irq_stat[cpu].__irq_count;
-
-       if (last_irq_sums[cpu] == sum) {
-               /*
-                * Ayiee, looks like this CPU is stuck ...
-                * wait a few IRQs (5 seconds) before doing the oops ...
-                */
-               watchdog_alert_counter++;
-               if (watchdog_alert_counter == 5 * watchdog_hz) {
-                       spin_lock(&watchdog_print_lock);
+
+       for_each_online_cpu(cpu) {
+
+               sum = irq_stat[cpu].__irq_count;
+
+               if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+                       && !(CHK_GDBSTUB_BUSY()
+                            || atomic_read(&cpu_doing_single_step))
+#endif
+                       ) {
                        /*
-                        * We are in trouble anyway, lets at least try
-                        * to get a message out.
+                        * Ayiee, looks like this CPU is stuck ...
+                        * wait a few IRQs (5 seconds) before doing the oops ...
                         */
-                       bust_spinlocks(1);
-                       printk(KERN_ERR
-                              "NMI Watchdog detected LOCKUP on CPU%d,"
-                              " pc %08lx, registers:\n",
-                              cpu, regs->pc);
-                       show_registers(regs);
-                       printk("console shuts up ...\n");
-                       console_silent();
-                       spin_unlock(&watchdog_print_lock);
-                       bust_spinlocks(0);
+                       watchdog_alert_counter[cpu]++;
+                       if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+                               spin_lock(&watchdog_print_lock);
+                               /*
+                                * We are in trouble anyway, lets at least try
+                                * to get a message out.
+                                */
+                               bust_spinlocks(1);
+                               printk(KERN_ERR
+                                      "NMI Watchdog detected LOCKUP on CPU%d,"
+                                      " pc %08lx, registers:\n",
+                                      cpu, regs->pc);
+#ifdef CONFIG_SMP
+                               printk(KERN_ERR
+                                      "--- Register Dump (CPU%d) ---\n",
+                                      CPUID);
+#endif
+                               show_registers(regs);
+#ifdef CONFIG_SMP
+                               smp_nmi_call_function(watchdog_dump_register,
+                                       NULL, 1);
+#endif
+                               printk(KERN_NOTICE "console shuts up ...\n");
+                               console_silent();
+                               spin_unlock(&watchdog_print_lock);
+                               bust_spinlocks(0);
 #ifdef CONFIG_GDBSTUB
-                       if (gdbstub_busy)
-                               gdbstub_exception(regs, excep);
-                       else
-                               gdbstub_intercept(regs, excep);
+                               if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+                                       gdbstub_exception(regs, excep);
+                               else
+                                       gdbstub_intercept(regs, excep);
 #endif
-                       do_exit(SIGSEGV);
+                               do_exit(SIGSEGV);
+                       }
+               } else {
+                       last_irq_sums[cpu] = sum;
+                       watchdog_alert_counter[cpu] = 0;
                }
-       } else {
-               last_irq_sums[cpu] = sum;
-               watchdog_alert_counter = 0;
        }
 
        WDCTR = wdt | WDCTR_WDRST;
index f48373e2bc1cffab139be817cf6f540776c51b79..0d0f8049a17b557183be48f844bb4b100f5cc87a 100644 (file)
@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
 /*
  * we use this if we don't have any better idle routine
  */
@@ -69,6 +70,35 @@ static void default_idle(void)
                local_irq_enable();
 }
 
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU  */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+       int oldval;
+
+       local_irq_enable();
+
+       /*
+        * Deal with another CPU just having chosen a thread to
+        * run here:
+        */
+       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+       if (!oldval) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               while (!need_resched())
+                       cpu_relax();
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               set_need_resched();
+       }
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
 /*
  * the idle thread
  * - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +107,6 @@ static void default_idle(void)
  */
 void cpu_idle(void)
 {
-       int cpu = smp_processor_id();
-
        /* endless idle loop with no priority at all */
        for (;;) {
                while (!need_resched()) {
@@ -86,10 +114,13 @@ void cpu_idle(void)
 
                        smp_rmb();
                        idle = pm_idle;
-                       if (!idle)
+                       if (!idle) {
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+                               idle = poll_idle;
+#else  /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
                                idle = default_idle;
-
-                       irq_stat[cpu].idle_timestamp = jiffies;
+#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
+                       }
                        idle();
                }
 
@@ -197,6 +228,7 @@ int copy_thread(unsigned long clone_flags,
                unsigned long c_usp, unsigned long ustk_size,
                struct task_struct *p, struct pt_regs *kregs)
 {
+       struct thread_info *ti = task_thread_info(p);
        struct pt_regs *c_uregs, *c_kregs, *uregs;
        unsigned long c_ksp;
 
@@ -217,7 +249,7 @@ int copy_thread(unsigned long clone_flags,
 
        /* the new TLS pointer is passed in as arg #5 to sys_clone() */
        if (clone_flags & CLONE_SETTLS)
-               c_uregs->e2 = __frame->d3;
+               c_uregs->e2 = current_frame()->d3;
 
        /* set up the return kernel frame if called from kernel_thread() */
        c_kregs = c_uregs;
@@ -235,7 +267,7 @@ int copy_thread(unsigned long clone_flags,
        }
 
        /* set up things up so the scheduler can start the new task */
-       p->thread.__frame = c_kregs;
+       ti->frame       = c_kregs;
        p->thread.a3    = (unsigned long) c_kregs;
        p->thread.sp    = c_ksp;
        p->thread.pc    = (unsigned long) ret_from_fork;
@@ -247,25 +279,26 @@ int copy_thread(unsigned long clone_flags,
 
 /*
  * clone a process
- * - tlsptr is retrieved by copy_thread() from __frame->d3
+ * - tlsptr is retrieved by copy_thread() from current_frame()->d3
  */
 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
                          int __user *parent_tidptr, int __user *child_tidptr,
                          int __user *tlsptr)
 {
-       return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0,
-                      parent_tidptr, child_tidptr);
+       return do_fork(clone_flags, newsp ?: current_frame()->sp,
+                      current_frame(), 0, parent_tidptr, child_tidptr);
 }
 
 asmlinkage long sys_fork(void)
 {
-       return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL);
+       return do_fork(SIGCHLD, current_frame()->sp,
+                      current_frame(), 0, NULL, NULL);
 }
 
 asmlinkage long sys_vfork(void)
 {
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame,
-                      0, NULL, NULL);
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
+                      current_frame(), 0, NULL, NULL);
 }
 
 asmlinkage long sys_execve(const char __user *name,
@@ -279,7 +312,7 @@ asmlinkage long sys_execve(const char __user *name,
        error = PTR_ERR(filename);
        if (IS_ERR(filename))
                return error;
-       error = do_execve(filename, argv, envp, __frame);
+       error = do_execve(filename, argv, envp, current_frame());
        putname(filename);
        return error;
 }
index 20d7d0306b1680e023518da900305e79ae3205fe..4f342f75d00cd18c9428e02dbb8a0e849dc73f41 100644 (file)
@@ -41,7 +41,7 @@ static __init int profile_init(void)
        tmp = TM11ICR;
 
        printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
-              mn10300_ioclk / 8 / (TM11BR + 1));
+              MN10300_IOCLK / 8 / (TM11BR + 1));
        printk(KERN_INFO "Profile histogram stored %p-%p\n",
               prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
 
index cf847dabc1bd3a7cd810628ea2500bbfd8b922a7..5c0b07e610063cdf34861bc0c2fc63666f696d4d 100644 (file)
@@ -295,31 +295,31 @@ void ptrace_disable(struct task_struct *child)
 /*
  * handle the arch-specific side of process tracing
  */
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR:
                ret = -EIO;
-               if ((addr & 3) || addr < 0 ||
-                   addr > sizeof(struct user) - 3)
+               if ((addr & 3) || addr > sizeof(struct user) - 3)
                        break;
 
                tmp = 0;  /* Default return condition */
                if (addr < NR_PTREGS << 2)
                        tmp = get_stack_long(child,
                                             ptrace_regid_to_frame[addr]);
-               ret = put_user(tmp, (unsigned long *) data);
+               ret = put_user(tmp, datap);
                break;
 
                /* write the word at location addr in the USER area */
        case PTRACE_POKEUSR:
                ret = -EIO;
-               if ((addr & 3) || addr < 0 ||
-                   addr > sizeof(struct user) - 3)
+               if ((addr & 3) || addr > sizeof(struct user) - 3)
                        break;
 
                ret = 0;
@@ -332,25 +332,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_mn10300_native_view,
                                           REGSET_GENERAL,
                                           0, NR_PTREGS * sizeof(long),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETREGS:    /* Set all integer regs in the child. */
                return copy_regset_from_user(child, &user_mn10300_native_view,
                                             REGSET_GENERAL,
                                             0, NR_PTREGS * sizeof(long),
-                                            (const void __user *)data);
+                                            datap);
 
        case PTRACE_GETFPREGS:  /* Get the child FPU state. */
                return copy_regset_to_user(child, &user_mn10300_native_view,
                                           REGSET_FPU,
                                           0, sizeof(struct fpu_state_struct),
-                                          (void __user *)data);
+                                          datap);
 
        case PTRACE_SETFPREGS:  /* Set the child FPU state. */
                return copy_regset_from_user(child, &user_mn10300_native_view,
                                             REGSET_FPU,
                                             0, sizeof(struct fpu_state_struct),
-                                            (const void __user *)data);
+                                            datap);
 
        default:
                ret = ptrace_request(child, request, addr, data);
index 4eef0e7224f63728feb8f99216cbad2b5be9999f..e9e20f9a4dd37de605f93fef669195ecda900296 100644 (file)
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
-/* time for RTC to update itself in ioclks */
-static unsigned long mn10300_rtc_update_period;
-
+/*
+ * Read the current RTC time
+ */
 void read_persistent_clock(struct timespec *ts)
 {
        struct rtc_time tm;
 
        get_rtc_time(&tm);
 
-       ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
-                     tm.tm_hour, tm.tm_min, tm.tm_sec);
        ts->tv_nsec = 0;
+       ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+                           tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+       /* if rtc is way off in the past, set something reasonable */
+       if (ts->tv_sec < 0)
+               ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
 }
 
 /*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
  */
 void __init calibrate_clock(void)
 {
-       unsigned long count0, counth, count1;
        unsigned char status;
 
        /* make sure the RTC is running and is set to operate in 24hr mode */
        status = RTSRC;
        RTCRB |= RTCRB_SET;
        RTCRB |= RTCRB_TM_24HR;
+       RTCRB &= ~RTCRB_DM_BINARY;
        RTCRA |= RTCRA_DVR;
        RTCRA &= ~RTCRA_DVR;
        RTCRB &= ~RTCRB_SET;
-
-       /* work out the clock speed by counting clock cycles between ends of
-        * the RTC update cycle - track the RTC through one complete update
-        * cycle (1 second)
-        */
-       startup_timestamp_counter();
-
-       while (!(RTCRA & RTCRA_UIP)) {}
-       while ((RTCRA & RTCRA_UIP)) {}
-
-       count0 = TMTSCBC;
-
-       while (!(RTCRA & RTCRA_UIP)) {}
-
-       counth = TMTSCBC;
-
-       while ((RTCRA & RTCRA_UIP)) {}
-
-       count1 = TMTSCBC;
-
-       shutdown_timestamp_counter();
-
-       MN10300_TSCCLK = count0 - count1; /* the timers count down */
-       mn10300_rtc_update_period = counth - count1;
-       MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
 }
index d464affcba0e31a0d201ac07f51fc7e4bd55cbf6..9e7a3209a3e1bc7297a9a9c7704cbcd46d6ae45c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
+#include <linux/cpu.h>
 #include <asm/processor.h>
 #include <linux/console.h>
 #include <asm/uaccess.h>
@@ -30,7 +31,6 @@
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <proc/proc.h>
-#include <asm/busctl-regs.h>
 #include <asm/fpu.h>
 #include <asm/sections.h>
 
@@ -64,11 +64,13 @@ unsigned long memory_size;
 struct thread_info *__current_ti = &init_thread_union.thread_info;
 struct task_struct *__current = &init_task;
 
-#define mn10300_known_cpus 3
+#define mn10300_known_cpus 5
 static const char *const mn10300_cputypes[] = {
-       "am33v1",
-       "am33v2",
-       "am34v1",
+       "am33-1",
+       "am33-2",
+       "am34-1",
+       "am33-3",
+       "am34-2",
        "unknown"
 };
 
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
 
        cpu_init();
        unit_setup();
+       smp_init_cpus();
        parse_mem_cmdline(cmdline_p);
 
        init_mm.start_code = (unsigned long)&_text;
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p)
 void __init cpu_init(void)
 {
        unsigned long cpurev = CPUREV, type;
-       unsigned long base, size;
 
        type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
        if (type > mn10300_known_cpus)
                type = mn10300_known_cpus;
 
-       printk(KERN_INFO "Matsushita %s, rev %ld\n",
+       printk(KERN_INFO "Panasonic %s, rev %ld\n",
               mn10300_cputypes[type],
               (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
 
-       /* determine the memory size and base from the memory controller regs */
-       memory_size = 0;
-
-       base = SDBASE(0);
-       if (base & SDBASE_CE) {
-               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
-               size = ~size + 1;
-               base &= SDBASE_CBA;
+       get_mem_info(&phys_memory_base, &memory_size);
+       phys_memory_end = phys_memory_base + memory_size;
 
-               printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
-               memory_size += size;
-               phys_memory_base = base;
-       }
+       fpu_init_state();
+}
 
-       base = SDBASE(1);
-       if (base & SDBASE_CE) {
-               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
-               size = ~size + 1;
-               base &= SDBASE_CBA;
+static struct cpu cpu_devices[NR_CPUS];
 
-               printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
-               memory_size += size;
-               if (phys_memory_base == 0)
-                       phys_memory_base = base;
-       }
+static int __init topology_init(void)
+{
+       int i;
 
-       phys_memory_end = phys_memory_base + memory_size;
+       for_each_present_cpu(i)
+               register_cpu(&cpu_devices[i], i);
 
-#ifdef CONFIG_FPU
-       fpu_init_state();
-#endif
+       return 0;
 }
 
+subsys_initcall(topology_init);
+
 /*
  * Get CPU information for use by the procfs.
  */
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
+#ifdef CONFIG_SMP
+       struct mn10300_cpuinfo *c = v;
+       unsigned long cpu_id = c - cpu_data;
+       unsigned long cpurev = c->type, type, icachesz, dcachesz;
+#else  /* CONFIG_SMP */
+       unsigned long cpu_id = 0;
        unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
+#endif /* CONFIG_SMP */
 
-       type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
+#ifdef CONFIG_SMP
+       if (!cpu_online(cpu_id))
+               return 0;
+#endif
+
+       type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
        if (type > mn10300_known_cpus)
                type = mn10300_known_cpus;
 
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                1024;
 
        seq_printf(m,
-                  "processor  : 0\n"
-                  "vendor_id  : Matsushita\n"
+                  "processor  : %ld\n"
+                  "vendor_id  : " PROCESSOR_VENDOR_NAME "\n"
                   "cpu core   : %s\n"
                   "cpu rev    : %lu\n"
                   "model name : " PROCESSOR_MODEL_NAME         "\n"
                   "icache size: %lu\n"
                   "dcache size: %lu\n",
+                  cpu_id,
                   mn10300_cputypes[type],
                   (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
                   icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                   "bogomips   : %lu.%02lu\n\n",
                   MN10300_IOCLK / 1000000,
                   (MN10300_IOCLK / 10000) % 100,
+#ifdef CONFIG_SMP
+                  c->loops_per_jiffy / (500000 / HZ),
+                  (c->loops_per_jiffy / (5000 / HZ)) % 100
+#else  /* CONFIG_SMP */
                   loops_per_jiffy / (500000 / HZ),
                   (loops_per_jiffy / (5000 / HZ)) % 100
+#endif /* CONFIG_SMP */
                   );
 
        return 0;
index d4de05ab786464cd585e7f1f0e6ed1652ad3de1c..690f4e9507d77811ef58f75a57960b1b078189a5 100644 (file)
@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig,
  */
 asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
 {
-       return do_sigaltstack(uss, uoss, __frame->sp);
+       return do_sigaltstack(uss, uoss, current_frame()->sp);
 }
 
 /*
@@ -156,10 +156,11 @@ badframe:
  */
 asmlinkage long sys_sigreturn(void)
 {
-       struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
+       struct sigframe __user *frame;
        sigset_t set;
        long d0;
 
+       frame = (struct sigframe __user *) current_frame()->sp;
        if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__get_user(set.sig[0], &frame->sc.oldmask))
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void)
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       if (restore_sigcontext(__frame, &frame->sc, &d0))
+       if (restore_sigcontext(current_frame(), &frame->sc, &d0))
                goto badframe;
 
        return d0;
@@ -191,11 +192,11 @@ badframe:
  */
 asmlinkage long sys_rt_sigreturn(void)
 {
-       struct rt_sigframe __user *frame =
-               (struct rt_sigframe __user *) __frame->sp;
+       struct rt_sigframe __user *frame;
        sigset_t set;
-       unsigned long d0;
+       long d0;
 
+       frame = (struct rt_sigframe __user *) current_frame()->sp;
        if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void)
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0))
+       if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
                goto badframe;
 
-       if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) ==
+           -EFAULT)
                goto badframe;
 
        return d0;
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(__frame);
+               tracehook_notify_resume(current_frame());
                if (current->replacement_session_keyring)
                        key_replace_session_keyring();
        }
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S
new file mode 100644 (file)
index 0000000..72938ce
--- /dev/null
@@ -0,0 +1,97 @@
+/* SMP IPI low-level handler
+ *
+ * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+       .am33_2
+
+###############################################################################
+#
+# IPI interrupt handler
+#
+###############################################################################
+       .globl mn10300_low_ipi_handler
+mn10300_low_ipi_handler:
+       add     -4,sp
+       mov     d0,(sp)
+       movhu   (IAGR),d0
+       and     IAGR_GN,d0
+       lsr     0x2,d0
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       cmp     FLUSH_CACHE_IPI,d0
+       beq     mn10300_flush_cache_ipi
+#endif
+       cmp     SMP_BOOT_IRQ,d0
+       beq     mn10300_smp_boot_ipi
+       /* OTHERS */
+       mov     (sp),d0
+       add     4,sp
+#ifdef CONFIG_GDBSTUB
+       jmp     gdbstub_io_rx_handler
+#else
+       jmp     end
+#endif
+
+###############################################################################
+#
+# Cache flush IPI interrupt handler
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+mn10300_flush_cache_ipi:
+       mov     (sp),d0
+       add     4,sp
+
+       /* FLUSH_CACHE_IPI */
+       add     -4,sp
+       SAVE_ALL
+       mov     GxICR_DETECT,d2
+       movbu   d2,(GxICR(FLUSH_CACHE_IPI))     # ACK the interrupt
+       movhu   (GxICR(FLUSH_CACHE_IPI)),d2
+       call    smp_cache_interrupt[],0
+       RESTORE_ALL
+       jmp     end
+#endif
+
+###############################################################################
+#
+# SMP boot CPU IPI interrupt handler
+#
+###############################################################################
+mn10300_smp_boot_ipi:
+       /* clear interrupt */
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0
+       and     ~GxICR_REQUEST,d0
+       movhu   d0,(GxICR(SMP_BOOT_IRQ))
+       mov     (sp),d0
+       add     4,sp
+
+       # get stack
+       mov     (CPUID),a0
+       add     -1,a0
+       add     a0,a0
+       add     a0,a0
+       mov     (start_stack,a0),a0
+       mov     a0,sp
+       jmp     initialize_secondary
+
+
+# Jump here after RTI to suppress the icache lookahead
+end:
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644 (file)
index 0000000..0dcd1c6
--- /dev/null
@@ -0,0 +1,1152 @@
+/* SMP support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+#include "internal.h"
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+
+static unsigned long sleep_mode[NR_CPUS];
+
+static void run_sleep_cpu(unsigned int cpu);
+static void run_wakeup_cpu(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Debug Message function
+ */
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#else
+#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#endif
+
+/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
+#define        CALL_FUNCTION_NMI_IPI_TIMEOUT   0
+
+/*
+ * Structure and data for smp_nmi_call_function().
+ */
+struct nmi_call_data_struct {
+       smp_call_func_t func;
+       void            *info;
+       cpumask_t       started;
+       cpumask_t       finished;
+       int             wait;
+       char            size_alignment[0]
+       __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static DEFINE_SPINLOCK(smp_nmi_call_lock);
+static struct nmi_call_data_struct *nmi_call_data;
+
+/*
+ * Data structures and variables
+ */
+static cpumask_t cpu_callin_map;       /* Bitmask of callin CPUs */
+static cpumask_t cpu_callout_map;      /* Bitmask of callout CPUs */
+cpumask_t cpu_boot_map;                        /* Bitmask of boot APs */
+unsigned long start_stack[NR_CPUS - 1];
+
+/*
+ * Per CPU parameters
+ */
+struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount;                   /* The count of boot CPUs */
+static cpumask_t smp_commenced_mask;
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/*
+ * Function Prototypes
+ */
+static int do_boot_cpu(int);
+static void smp_show_cpu_info(int cpu_id);
+static void smp_callin(void);
+static void smp_online(void);
+static void smp_store_cpu_info(int);
+static void smp_cpu_init(void);
+static void smp_tune_scheduling(void);
+static void send_IPI_mask(const cpumask_t *cpumask, int irq);
+static void init_ipi(void);
+
+/*
+ * IPI Initialization interrupt definitions
+ */
+static void mn10300_ipi_disable(unsigned int irq);
+static void mn10300_ipi_enable(unsigned int irq);
+static void mn10300_ipi_ack(unsigned int irq);
+static void mn10300_ipi_nop(unsigned int irq);
+
+static struct irq_chip mn10300_ipi_type = {
+       .name           = "cpu_ipi",
+       .disable        = mn10300_ipi_disable,
+       .enable         = mn10300_ipi_enable,
+       .ack            = mn10300_ipi_ack,
+       .eoi            = mn10300_ipi_nop
+};
+
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
+
+static struct irqaction reschedule_ipi = {
+       .handler        = smp_reschedule_interrupt,
+       .name           = "smp reschedule IPI"
+};
+static struct irqaction call_function_ipi = {
+       .handler        = smp_call_function_interrupt,
+       .name           = "smp call function IPI"
+};
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
+static struct irqaction local_timer_ipi = {
+       .handler        = smp_ipi_timer_interrupt,
+       .flags          = IRQF_DISABLED,
+       .name           = "smp local timer IPI"
+};
+#endif
+
+/**
+ * init_ipi - Initialise the IPI mechanism
+ */
+static void init_ipi(void)
+{
+       unsigned long flags;
+       u16 tmp16;
+
+       /* set up the reschedule IPI */
+       set_irq_chip_and_handler(RESCHEDULE_IPI,
+                                &mn10300_ipi_type, handle_percpu_irq);
+       setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
+       set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
+       mn10300_ipi_enable(RESCHEDULE_IPI);
+
+       /* set up the call function IPI */
+       set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
+                                &mn10300_ipi_type, handle_percpu_irq);
+       setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
+       set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
+       mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+       /* set up the local timer IPI */
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+       set_irq_chip_and_handler(LOCAL_TIMER_IPI,
+                                &mn10300_ipi_type, handle_percpu_irq);
+       setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
+       set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
+       mn10300_ipi_enable(LOCAL_TIMER_IPI);
+#endif
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       /* set up the cache flush IPI */
+       flags = arch_local_cli_save();
+       __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
+                       mn10300_low_ipi_handler);
+       GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(FLUSH_CACHE_IPI);
+       arch_local_irq_restore(flags);
+#endif
+
+       /* set up the NMI call function IPI */
+       flags = arch_local_cli_save();
+       GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+       tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+       arch_local_irq_restore(flags);
+
+       /* set up the SMP boot IPI */
+       flags = arch_local_cli_save();
+       __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
+                       mn10300_low_ipi_handler);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_shutdown - Shut down handling of an IPI
+ * @irq: The IPI to be shut down.
+ */
+static void mn10300_ipi_shutdown(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+       tmp = GxICR(irq);
+
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_enable - Enable an IPI
+ * @irq: The IPI to be enabled.
+ */
+static void mn10300_ipi_enable(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+       tmp = GxICR(irq);
+
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_disable - Disable an IPI
+ * @irq: The IPI to be disabled.
+ */
+static void mn10300_ipi_disable(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       tmp = GxICR(irq);
+       GxICR(irq) = tmp & GxICR_LEVEL;
+       tmp = GxICR(irq);
+
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
+ * @irq: The IPI to be acknowledged.
+ *
+ * Clear the interrupt detection flag for the IPI on the appropriate interrupt
+ * channel in the PIC.
+ */
+static void mn10300_ipi_ack(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+       GxICR_u8(irq) = GxICR_DETECT;
+       tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_nop - Dummy IPI action
+ * @irq: The IPI to be acted upon.
+ */
+static void mn10300_ipi_nop(unsigned int irq)
+{
+}
+
+/**
+ * send_IPI_mask - Send IPIs to all CPUs in list
+ * @cpumask: The list of CPUs to target.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all the CPUs in the list, not waiting for them to
+ * finish before returning.  The caller is responsible for synchronisation if
+ * that is needed.
+ */
+static void send_IPI_mask(const cpumask_t *cpumask, int irq)
+{
+       int i;
+       u16 tmp;
+
+       for (i = 0; i < NR_CPUS; i++) {
+               if (cpu_isset(i, *cpumask)) {
+                       /* send IPI */
+                       tmp = CROSS_GxICR(irq, i);
+                       CROSS_GxICR(irq, i) =
+                               tmp | GxICR_REQUEST | GxICR_DETECT;
+                       tmp = CROSS_GxICR(irq, i); /* flush write buffer */
+               }
+       }
+}
+
+/**
+ * send_IPI_self - Send an IPI to this CPU.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to the current CPU.
+ */
+void send_IPI_self(int irq)
+{
+       send_IPI_mask(cpumask_of(smp_processor_id()), irq);
+}
+
+/**
+ * send_IPI_allbutself - Send IPIs to all the other CPUs.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all CPUs in the system barring the current one,
+ * not waiting for them to finish before returning.  The caller is responsible
+ * for synchronisation if that is needed.
+ */
+void send_IPI_allbutself(int irq)
+{
+       cpumask_t cpumask;
+
+       cpumask = cpu_online_map;
+       cpu_clear(smp_processor_id(), cpumask);
+       send_IPI_mask(&cpumask, irq);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       BUG();
+       /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+       send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
+}
+
+/**
+ * smp_send_reschedule - Send reschedule IPI to a CPU
+ * @cpu: The CPU to target.
+ */
+void smp_send_reschedule(int cpu)
+{
+       send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
+}
+
+/**
+ * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
+ * @func: The function to ask to be run.
+ * @info: The context data to pass to that function.
+ * @wait: If true, wait (atomically) until function is run on all CPUs.
+ *
+ * Send a non-maskable request to all CPUs in the system, requesting them to
+ * run the specified function with the given context data, and, potentially, to
+ * wait for completion of that function on all CPUs.
+ *
+ * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
+ * timeout.
+ */
+int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
+{
+       struct nmi_call_data_struct data;
+       unsigned long flags;
+       unsigned int cnt;
+       int cpus, ret = 0;
+
+       cpus = num_online_cpus() - 1;
+       if (cpus < 1)
+               return 0;
+
+       data.func = func;
+       data.info = info;
+       data.started = cpu_online_map;
+       cpu_clear(smp_processor_id(), data.started);
+       data.wait = wait;
+       if (wait)
+               data.finished = data.started;
+
+       spin_lock_irqsave(&smp_nmi_call_lock, flags);
+       nmi_call_data = &data;
+       smp_mb();
+
+       /* Send a message to all other CPUs and wait for them to respond */
+       send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
+
+       /* Wait for response */
+       if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
+               for (cnt = 0;
+                    cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+                            !cpus_empty(data.started);
+                    cnt++)
+                       mdelay(1);
+
+               if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
+                       for (cnt = 0;
+                            cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+                                    !cpus_empty(data.finished);
+                            cnt++)
+                               mdelay(1);
+               }
+
+               if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
+                       ret = -ETIMEDOUT;
+
+       } else {
+               /* If timeout value is zero, wait until cpumask has been
+                * cleared */
+               while (!cpus_empty(data.started))
+                       barrier();
+               if (wait)
+                       while (!cpus_empty(data.finished))
+                               barrier();
+       }
+
+       spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
+       return ret;
+}
+
+/**
+ * stop_this_cpu - Callback to stop a CPU.
+ * @unused: Callback context (ignored).
+ */
+void stop_this_cpu(void *unused)
+{
+       static volatile int stopflag;
+       unsigned long flags;
+
+#ifdef CONFIG_GDBSTUB
+       /* In case of single stepping smp_send_stop by other CPU,
+        * clear procindebug to avoid deadlock.
+        */
+       atomic_set(&procindebug[smp_processor_id()], 0);
+#endif /* CONFIG_GDBSTUB */
+
+       flags = arch_local_cli_save();
+       cpu_clear(smp_processor_id(), cpu_online_map);
+
+       while (!stopflag)
+               cpu_relax();
+
+       cpu_set(smp_processor_id(), cpu_online_map);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_send_stop - Send a stop request to all CPUs.
+ */
+void smp_send_stop(void)
+{
+       smp_nmi_call_function(stop_this_cpu, NULL, 0);
+}
+
+/**
+ * smp_reschedule_interrupt - Reschedule IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * We need do nothing here, since the scheduling will be effected on our way
+ * back through entry.S.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+{
+       /* do nothing */
+       return IRQ_HANDLED;
+}
+
+/**
+ * smp_call_function_interrupt - Call function IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+{
+       /* generic_smp_call_function_interrupt(); */
+       generic_smp_call_function_single_interrupt();
+       return IRQ_HANDLED;
+}
+
+/**
+ * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
+ */
+void smp_nmi_call_function_interrupt(void)
+{
+       smp_call_func_t func = nmi_call_data->func;
+       void *info = nmi_call_data->info;
+       int wait = nmi_call_data->wait;
+
+       /* Notify the initiating CPU that I've grabbed the data and am about to
+        * execute the function
+        */
+       smp_mb();
+       cpu_clear(smp_processor_id(), nmi_call_data->started);
+       (*func)(info);
+
+       if (wait) {
+               smp_mb();
+               cpu_clear(smp_processor_id(), nmi_call_data->finished);
+       }
+}
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/**
+ * smp_ipi_timer_interrupt - Local timer IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
+{
+       return local_timer_interrupt();
+}
+#endif
+
+void __init smp_init_cpus(void)
+{
+       int i;
+       for (i = 0; i < NR_CPUS; i++) {
+               set_cpu_possible(i, true);
+               set_cpu_present(i, true);
+       }
+}
+
+/**
+ * smp_cpu_init - Initialise AP in start_secondary.
+ *
+ * For this Application Processor, set up init_mm, initialise FPU and set
+ * interrupt level 0-6 setting.
+ */
+static void __init smp_cpu_init(void)
+{
+       unsigned long flags;
+       int cpu_id = smp_processor_id();
+       u16 tmp16;
+
+       if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+               for (;;)
+                       local_irq_enable();
+       }
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+       BUG_ON(current->mm);
+
+       enter_lazy_tlb(&init_mm, current);
+
+       /* Force FPU initialization */
+       clear_using_fpu(current);
+
+       GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+       GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(LOCAL_TIMER_IPI);
+
+       GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(RESCHEDULE_IPI);
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(FLUSH_CACHE_IPI);
+#endif
+
+       mn10300_ipi_shutdown(SMP_BOOT_IRQ);
+
+       /* Set up the non-maskable call function IPI */
+       flags = arch_local_cli_save();
+       GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+       tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_prepare_cpu_init - Initialise CPU in startup_secondary
+ *
+ * Set interrupt level 0-6 setting and init ICR of gdbstub.
+ */
+void smp_prepare_cpu_init(void)
+{
+       int loop;
+
+       /* Set the interrupt vector registers */
+       IVAR0 = EXCEP_IRQ_LEVEL0;
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+       IVAR6 = EXCEP_IRQ_LEVEL6;
+
+       /* Disable all interrupts and set to priority 6 (lowest) */
+       for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+               GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+
+#ifdef CONFIG_GDBSTUB
+       /* initialise GDB-stub */
+       do {
+               unsigned long flags;
+               u16 tmp16;
+
+               flags = arch_local_cli_save();
+               GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+               tmp16 = GxICR(GDB_NMI_IPI);
+               arch_local_irq_restore(flags);
+       } while (0);
+#endif
+}
+
+/**
+ * start_secondary - Activate a secondary CPU (AP)
+ * @unused: Thread parameter (ignored).
+ */
+int __init start_secondary(void *unused)
+{
+       smp_cpu_init();
+       smp_callin();
+       while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+               cpu_relax();
+
+       local_flush_tlb();
+       preempt_disable();
+       smp_online();
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+       init_clockevents();
+#endif
+       cpu_idle();
+       return 0;
+}
+
+/**
+ * smp_prepare_cpus - Boot up secondary CPUs (APs)
+ * @max_cpus: Maximum number of CPUs to boot.
+ *
+ * Call do_boot_cpu, and boot up APs.
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+       int phy_id;
+
+       /* Setup boot CPU information */
+       smp_store_cpu_info(0);
+       smp_tune_scheduling();
+
+       init_ipi();
+
+       /* If SMP should be disabled, then finish */
+       if (max_cpus == 0) {
+               printk(KERN_INFO "SMP mode deactivated.\n");
+               goto smp_done;
+       }
+
+       /* Boot secondary CPUs (for which phy_id > 0) */
+       for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
+               /* Don't boot primary CPU */
+               if (max_cpus <= cpucount + 1)
+                       continue;
+               if (phy_id != 0)
+                       do_boot_cpu(phy_id);
+               set_cpu_possible(phy_id, true);
+               smp_show_cpu_info(phy_id);
+       }
+
+smp_done:
+       Dprintk("Boot done.\n");
+}
+
+/**
+ * smp_store_cpu_info - Save a CPU's information
+ * @cpu: The CPU to save for.
+ *
+ * Save boot_cpu_data and jiffy for the specified CPU.
+ */
+static void __init smp_store_cpu_info(int cpu)
+{
+       struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+       *ci = boot_cpu_data;
+       ci->loops_per_jiffy = loops_per_jiffy;
+       ci->type = CPUREV;
+}
+
+/**
+ * smp_tune_scheduling - Set time slice value
+ *
+ * Nothing to do here.
+ */
+static void __init smp_tune_scheduling(void)
+{
+}
+
+/**
+ * do_boot_cpu: Boot up one CPU
+ * @phy_id: Physical ID of CPU to boot.
+ *
+ * Send an IPI to a secondary CPU to boot it.  Returns 0 on success, 1
+ * otherwise.
+ */
+static int __init do_boot_cpu(int phy_id)
+{
+       struct task_struct *idle;
+       unsigned long send_status, callin_status;
+       int timeout, cpu_id;
+
+       send_status = GxICR_REQUEST;
+       callin_status = 0;
+       timeout = 0;
+       cpu_id = phy_id;
+
+       cpucount++;
+
+       /* Create idle thread for this CPU */
+       idle = fork_idle(cpu_id);
+       if (IS_ERR(idle))
+               panic("Failed fork for CPU#%d.", cpu_id);
+
+       idle->thread.pc = (unsigned long)start_secondary;
+
+       printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
+       start_stack[cpu_id - 1] = idle->thread.sp;
+
+       task_thread_info(idle)->cpu = cpu_id;
+
+       /* Send boot IPI to AP */
+       send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
+
+       Dprintk("Waiting for send to finish...\n");
+
+       /* Wait for AP's IPI receive in 100[ms] */
+       do {
+               udelay(1000);
+               send_status =
+                       CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
+       } while (send_status == GxICR_REQUEST && timeout++ < 100);
+
+       Dprintk("Waiting for cpu_callin_map.\n");
+
+       if (send_status == 0) {
+               /* Allow AP to start initializing */
+               cpu_set(cpu_id, cpu_callout_map);
+
+               /* Wait for setting cpu_callin_map */
+               timeout = 0;
+               do {
+                       udelay(1000);
+                       callin_status = cpu_isset(cpu_id, cpu_callin_map);
+               } while (callin_status == 0 && timeout++ < 5000);
+
+               if (callin_status == 0)
+                       Dprintk("Not responding.\n");
+       } else {
+               printk(KERN_WARNING "IPI not delivered.\n");
+       }
+
+       if (send_status == GxICR_REQUEST || callin_status == 0) {
+               cpu_clear(cpu_id, cpu_callout_map);
+               cpu_clear(cpu_id, cpu_callin_map);
+               cpu_clear(cpu_id, cpu_initialized);
+               cpucount--;
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * smp_show_cpu_info - Show SMP CPU information
+ * @cpu: The CPU of interest.
+ */
+static void __init smp_show_cpu_info(int cpu)
+{
+       struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+       printk(KERN_INFO
+              "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
+              cpu,
+              MN10300_IOCLK / 1000000,
+              (MN10300_IOCLK / 10000) % 100,
+              ci->loops_per_jiffy / (500000 / HZ),
+              (ci->loops_per_jiffy / (5000 / HZ)) % 100);
+}
+
+/**
+ * smp_callin - Set cpu_callin_map of the current CPU ID
+ */
+static void __init smp_callin(void)
+{
+       unsigned long timeout;
+       int cpu;
+
+       cpu = smp_processor_id();
+       timeout = jiffies + (2 * HZ);
+
+       if (cpu_isset(cpu, cpu_callin_map)) {
+               printk(KERN_ERR "CPU#%d already present.\n", cpu);
+               BUG();
+       }
+       Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
+
+       /* Wait for AP startup 2s total */
+       while (time_before(jiffies, timeout)) {
+               if (cpu_isset(cpu, cpu_callout_map))
+                       break;
+               cpu_relax();
+       }
+
+       if (!time_before(jiffies, timeout)) {
+               printk(KERN_ERR
+                      "BUG: CPU#%d started up but did not get a callout!\n",
+                      cpu);
+               BUG();
+       }
+
+#ifdef CONFIG_CALIBRATE_DELAY
+       calibrate_delay();              /* Get our bogomips */
+#endif
+
+       /* Save our processor parameters */
+       smp_store_cpu_info(cpu);
+
+       /* Allow the boot processor to continue */
+       cpu_set(cpu, cpu_callin_map);
+}
+
+/**
+ * smp_online - Set cpu_online_map
+ */
+static void __init smp_online(void)
+{
+       int cpu;
+
+       cpu = smp_processor_id();
+
+       local_irq_enable();
+
+       cpu_set(cpu, cpu_online_map);
+       smp_wmb();
+}
+
+/**
+ * smp_cpus_done -
+ * @max_cpus: Maximum CPU count.
+ *
+ * Do nothing.
+ */
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * smp_prepare_boot_cpu - Set up stuff for the boot processor.
+ *
+ * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
+ * processor (CPU 0).
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+       cpu_set(0, cpu_callout_map);
+       cpu_set(0, cpu_callin_map);
+       current_thread_info()->cpu = 0;
+}
+
+/*
+ * initialize_secondary - Initialise a secondary CPU (Application Processor).
+ *
+ * Set SP register and jump to thread's PC address.
+ */
+void initialize_secondary(void)
+{
+       asm volatile (
+               "mov    %0,sp   \n"
+               "jmp    (%1)    \n"
+               :
+               : "a"(current->thread.sp), "a"(current->thread.pc));
+}
+
+/**
+ * __cpu_up - Set smp_commenced_mask for the nominated CPU
+ * @cpu: The target CPU.
+ */
+int __devinit __cpu_up(unsigned int cpu)
+{
+       int timeout;
+
+#ifdef CONFIG_HOTPLUG_CPU
+       if (num_online_cpus() == 1)
+               disable_hlt();
+       if (sleep_mode[cpu])
+               run_wakeup_cpu(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+       cpu_set(cpu, smp_commenced_mask);
+
+       /* Wait 5s total for a response */
+       for (timeout = 0 ; timeout < 5000 ; timeout++) {
+               if (cpu_isset(cpu, cpu_online_map))
+                       break;
+               udelay(1000);
+       }
+
+       BUG_ON(!cpu_isset(cpu, cpu_online_map));
+       return 0;
+}
+
+/**
+ * setup_profiling_timer - Set up the profiling timer
+ * @multiplier - The frequency multiplier to use
+ *
+ * The frequency of the profiling timer can be changed by writing a multiplier
+ * value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
+/*
+ * CPU hotplug routines
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+       int cpu, ret;
+
+       for_each_cpu(cpu) {
+               ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+               if (ret)
+                       printk(KERN_WARNING
+                              "topology_init: register_cpu %d failed (%d)\n",
+                              cpu, ret);
+       }
+       return 0;
+}
+
+subsys_initcall(topology_init);
+
+int __cpu_disable(void)
+{
+       int cpu = smp_processor_id();
+       if (cpu == 0)
+               return -EBUSY;
+
+       migrate_irqs();
+       cpu_clear(cpu, current->active_mm->cpu_vm_mask);
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       run_sleep_cpu(cpu);
+
+       if (num_online_cpus() == 1)
+               enable_hlt();
+}
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+static inline void hotplug_cpu_disable_cache(void)
+{
+       int tmp;
+       asm volatile(
+               "       movhu   (%1),%0 \n"
+               "       and     %2,%0   \n"
+               "       movhu   %0,(%1) \n"
+               "1:     movhu   (%1),%0 \n"
+               "       btst    %3,%0   \n"
+               "       bne     1b      \n"
+               : "=&r"(tmp)
+               : "a"(&CHCTR),
+                 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
+                 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
+               : "memory", "cc");
+}
+
+static inline void hotplug_cpu_enable_cache(void)
+{
+       int tmp;
+       asm volatile(
+               "movhu  (%1),%0 \n"
+               "or     %2,%0   \n"
+               "movhu  %0,(%1) \n"
+               : "=&r"(tmp)
+               : "a"(&CHCTR),
+                 "i"(CHCTR_ICEN | CHCTR_DCEN)
+               : "memory", "cc");
+}
+
+static inline void hotplug_cpu_invalidate_cache(void)
+{
+       int tmp;
+       asm volatile (
+               "movhu  (%1),%0 \n"
+               "or     %2,%0   \n"
+               "movhu  %0,(%1) \n"
+               : "=&r"(tmp)
+               : "a"(&CHCTR),
+                 "i"(CHCTR_ICINV | CHCTR_DCINV)
+               : "cc");
+}
+
+#else /* CONFIG_MN10300_CACHE_ENABLED */
+#define hotplug_cpu_disable_cache()    do {} while (0)
+#define hotplug_cpu_enable_cache()     do {} while (0)
+#define hotplug_cpu_invalidate_cache() do {} while (0)
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/**
+ * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
+ * @cpumask: List of target CPUs.
+ * @func: The function to call on those CPUs.
+ * @info: The context data for the function to be called.
+ * @wait: Whether to wait for the calls to complete.
+ *
+ * Non-maskably call a function on another CPU for hotplug purposes.
+ *
+ * This function must be called with maskable interrupts disabled.
+ */
+static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
+                                        smp_call_func_t func, void *info,
+                                        int wait)
+{
+       /*
+        * The address and the size of nmi_call_func_mask_data
+        * need to be aligned on L1_CACHE_BYTES.
+        */
+       static struct nmi_call_data_struct nmi_call_func_mask_data
+               __cacheline_aligned;
+       unsigned long start, end;
+
+       start = (unsigned long)&nmi_call_func_mask_data;
+       end = start + sizeof(struct nmi_call_data_struct);
+
+       nmi_call_func_mask_data.func = func;
+       nmi_call_func_mask_data.info = info;
+       nmi_call_func_mask_data.started = cpumask;
+       nmi_call_func_mask_data.wait = wait;
+       if (wait)
+               nmi_call_func_mask_data.finished = cpumask;
+
+       spin_lock(&smp_nmi_call_lock);
+       nmi_call_data = &nmi_call_func_mask_data;
+       mn10300_local_dcache_flush_range(start, end);
+       smp_wmb();
+
+       send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
+
+       do {
+               mn10300_local_dcache_inv_range(start, end);
+               barrier();
+       } while (!cpus_empty(nmi_call_func_mask_data.started));
+
+       if (wait) {
+               do {
+                       mn10300_local_dcache_inv_range(start, end);
+                       barrier();
+               } while (!cpus_empty(nmi_call_func_mask_data.finished));
+       }
+
+       spin_unlock(&smp_nmi_call_lock);
+       return 0;
+}
+
+static void restart_wakeup_cpu(void)
+{
+       unsigned int cpu = smp_processor_id();
+
+       cpu_set(cpu, cpu_callin_map);
+       local_flush_tlb();
+       cpu_set(cpu, cpu_online_map);
+       smp_wmb();
+}
+
+static void prepare_sleep_cpu(void *unused)
+{
+       sleep_mode[smp_processor_id()] = 1;
+       smp_mb();
+       mn10300_local_dcache_flush_inv();
+       hotplug_cpu_disable_cache();
+       hotplug_cpu_invalidate_cache();
+}
+
+/* when this function called, IE=0, NMID=0. */
+static void sleep_cpu(void *unused)
+{
+       unsigned int cpu_id = smp_processor_id();
+       /*
+        * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
+        * before this cpu goes in SLEEP mode.
+        */
+       do {
+               smp_mb();
+               __sleep_cpu();
+       } while (sleep_mode[cpu_id]);
+       restart_wakeup_cpu();
+}
+
+static void run_sleep_cpu(unsigned int cpu)
+{
+       unsigned long flags;
+       cpumask_t cpumask = cpumask_of(cpu);
+
+       flags = arch_local_cli_save();
+       hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
+       hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
+       udelay(1);              /* delay for the cpu to sleep. */
+       arch_local_irq_restore(flags);
+}
+
+static void wakeup_cpu(void)
+{
+       hotplug_cpu_invalidate_cache();
+       hotplug_cpu_enable_cache();
+       smp_mb();
+       sleep_mode[smp_processor_id()] = 0;
+}
+
+static void run_wakeup_cpu(unsigned int cpu)
+{
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+#if NR_CPUS == 2
+       mn10300_local_dcache_flush_inv();
+#else
+       /*
+        * Before waking up the cpu,
+        * all online cpus should stop and flush D-Cache for global data.
+        */
+#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
+#endif
+       hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
+       arch_local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
index 630aad71b9461513aa0db71445d9fc1adc2f4ed6..9074d0fb8788fbc5187d6d77b026a271c91050f5 100644 (file)
@@ -15,6 +15,9 @@
 #include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/cpu-regs.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
        .text
 
@@ -35,8 +38,6 @@ ENTRY(__switch_to)
        mov     d1,a1
 
        # save prev context
-       mov     (__frame),d0
-       mov     d0,(THREAD_FRAME,a0)
        mov     __switch_back,d0
        mov     d0,(THREAD_PC,a0)
        mov     sp,a2
@@ -58,8 +59,6 @@ ENTRY(__switch_to)
        mov     a2,e2
 #endif
 
-       mov     (THREAD_FRAME,a1),a2
-       mov     a2,(__frame)
        mov     (THREAD_PC,a1),a2
        mov     d2,d0                   # for ret_from_fork
        mov     d0,a0                   # for __switch_to
index 8f7f6d22783d5065a61bb8bb4636c776b5892e5f..f860a340acc920e0565f4d341fa263b5c8ea49a5 100644 (file)
 #include <linux/smp.h>
 #include <linux/profile.h>
 #include <linux/cnt32_to_63.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
 #include <asm/rtc.h>
-
-#ifdef CONFIG_MN10300_RTC
-unsigned long mn10300_ioclk;           /* system I/O clock frequency */
-unsigned long mn10300_iobclk;          /* system I/O clock frequency */
-unsigned long mn10300_tsc_per_HZ;      /* number of ioclks per jiffy */
-#endif /* CONFIG_MN10300_RTC */
+#include "internal.h"
 
 static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
                                         * interrupt occurred */
 
-static irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static struct irqaction timer_irq = {
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
-       .name           = "timer",
-};
-
 static unsigned long sched_clock_multiplier;
 
 /*
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void)
        unsigned long tsc, tmp;
        unsigned product[3]; /* 96-bit intermediate value */
 
+       /* cnt32_to_63() is not safe with preemption */
+       preempt_disable();
+
        /* read the TSC value
         */
-       tsc = 0 - get_cycles(); /* get_cycles() counts down */
+       tsc = get_cycles();
 
        /* expand to 64-bits.
         * - sched_clock() must be called once a minute or better or the
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void)
         */
        tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
 
+       preempt_enable();
+
        /* scale the 64-bit TSC value to a nanosecond value via a 96-bit
         * intermediate
         */
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void)
                __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
 }
 
+/**
+ * local_timer_interrupt - Local timer interrupt handler
+ *
+ * Handle local timer interrupts for this CPU.  They may have been propagated
+ * to this CPU from the CPU that actually gets them by way of an IPI.
+ */
+irqreturn_t local_timer_interrupt(void)
+{
+       profile_tick(CPU_PROFILING);
+       update_process_times(user_mode(get_irq_regs()));
+       return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_TIME
 /*
  * advance the kernel's time keeping clocks (xtime and jiffies)
  * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        unsigned tsc, elapse;
+       irqreturn_t ret;
 
        write_seqlock(&xtime_lock);
 
        while (tsc = get_cycles(),
-              elapse = mn10300_last_tsc - tsc, /* time elapsed since last
+              elapse = tsc - mn10300_last_tsc, /* time elapsed since last
                                                 * tick */
               elapse > MN10300_TSC_PER_HZ
               ) {
-               mn10300_last_tsc -= MN10300_TSC_PER_HZ;
+               mn10300_last_tsc += MN10300_TSC_PER_HZ;
 
                /* advance the kernel's time tracking system */
-               profile_tick(CPU_PROFILING);
                do_timer(1);
        }
 
        write_sequnlock(&xtime_lock);
 
-       update_process_times(user_mode(get_irq_regs()));
+       ret = local_timer_interrupt();
+#ifdef CONFIG_SMP
+       send_IPI_allbutself(LOCAL_TIMER_IPI);
+#endif
+       return ret;
+}
 
-       return IRQ_HANDLED;
+static struct irqaction timer_irq = {
+       .handler        = timer_interrupt,
+       .flags          = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
+       .name           = "timer",
+};
+#endif /* CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_CSRC_MN10300
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+       u64 temp;
+       u32 shift;
+
+       /* Find a shift value */
+       for (shift = 32; shift > 0; shift--) {
+               temp = (u64) NSEC_PER_SEC << shift;
+               do_div(temp, clock);
+               if ((temp >> 32) == 0)
+                       break;
+       }
+       cs->shift = shift;
+       cs->mult = (u32) temp;
 }
+#endif
+
+#if CONFIG_CEVT_MN10300
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+                                   unsigned int clock)
+{
+       u64 temp;
+       u32 shift;
+
+       /* Find a shift value */
+       for (shift = 32; shift > 0; shift--) {
+               temp = (u64) clock << shift;
+               do_div(temp, NSEC_PER_SEC);
+               if ((temp >> 32) == 0)
+                       break;
+       }
+       cd->shift = shift;
+       cd->mult = (u32) temp;
+}
+#endif
 
 /*
  * initialise the various timers used by the main part of the kernel
@@ -131,21 +185,25 @@ void __init time_init(void)
         */
        TMPSCNT |= TMPSCNT_ENABLE;
 
+#ifdef CONFIG_GENERIC_TIME
+       init_clocksource();
+#else
        startup_timestamp_counter();
+#endif
 
        printk(KERN_INFO
               "timestamp counter I/O clock running at %lu.%02lu"
               " (calibrated against RTC)\n",
               MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
 
-       mn10300_last_tsc = TMTSCBC;
-
-       /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
-       setup_irq(TMJCIRQ, &timer_irq);
+       mn10300_last_tsc = read_timestamp_counter();
 
-       set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
-
-       startup_jiffies_counter();
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+       init_clockevents();
+#else
+       reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+       setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL);
+#endif
 
 #ifdef CONFIG_MN10300_WD_TIMER
        /* start the watchdog timer */
index 91365adba4f5568925d3398545d3b9bcc9ed9f96..b90c3f160c77b0e598fe90578d9362de54e4a1f1 100644 (file)
@@ -45,9 +45,6 @@
 #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
 #endif
 
-struct pt_regs *__frame; /* current frame pointer */
-EXPORT_SYMBOL(__frame);
-
 int kstack_depth_to_print = 24;
 
 spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL,  {}, "invalid opcode",              invalid_op,     ILL_ILLOPC);
 DO_EINFO(SIGILL,  {}, "invalid ex opcode",     invalid_exop,   ILL_ILLOPC);
 DO_EINFO(SIGBUS,  {}, "invalid address",       mem_error,      BUS_ADRERR);
 DO_EINFO(SIGBUS,  {}, "bus error",             bus_error,      BUS_ADRERR);
-DO_EINFO(SIGILL,  {}, "FPU invalid opcode",    fpu_invalid_op, ILL_COPROC);
 
 DO_ERROR(SIGTRAP,
 #ifndef CONFIG_MN10300_USING_JTAG
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs)
        printk(KERN_EMERG "threadinfo=%p task=%p)\n",
               current_thread_info(), current);
 
-       if ((unsigned long) current >= 0x90000000UL &&
-           (unsigned long) current < 0x94000000UL)
+       if ((unsigned long) current >= PAGE_OFFSET &&
+           (unsigned long) current < (unsigned long)high_memory)
                printk(KERN_EMERG "Process %s (pid: %d)\n",
                       current->comm, current->pid);
 
+#ifdef CONFIG_SMP
+       printk(KERN_EMERG "CPUID:  %08x\n", CPUID);
+#endif
        printk(KERN_EMERG "CPUP:   %04hx\n", CPUP);
        printk(KERN_EMERG "TBR:    %08x\n", TBR);
        printk(KERN_EMERG "DEAR:   %08x\n", DEAR);
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
 {
        unsigned long addr;
        u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
+       unsigned long flags;
 
        addr = (unsigned long) handler - (unsigned long) vector;
+
+       flags = arch_local_cli_save();
+
        vector[0] = 0xdc;               /* JMP handler */
        vector[1] = addr;
        vector[2] = addr >> 8;
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
        vector[6] = 0xcb;
        vector[7] = 0xcb;
 
-       mn10300_dcache_flush_inv();
-       mn10300_icache_inv();
-}
-
-/*
- * set an interrupt stub to invoke the JTAG unit and then jump to a handler
- */
-void __init set_jtag_stub(enum exception_code code, void *handler)
-{
-       unsigned long addr;
-       u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
-
-       addr = (unsigned long) handler - ((unsigned long) vector + 1);
-       vector[0] = 0xff;               /* PI to jump into JTAG debugger */
-       vector[1] = 0xdc;               /* jmp handler */
-       vector[2] = addr;
-       vector[3] = addr >> 8;
-       vector[4] = addr >> 16;
-       vector[5] = addr >> 24;
-       vector[6] = 0xcb;
-       vector[7] = 0xcb;
+       arch_local_irq_restore(flags);
 
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush_inv();
-       flush_icache_range((unsigned long) vector, (unsigned long) vector + 8);
+       mn10300_icache_inv();
+#endif
 }
 
 /*
@@ -581,7 +566,6 @@ void __init trap_init(void)
        set_excp_vector(EXCEP_PRIVINSACC,       insn_acc_error);
        set_excp_vector(EXCEP_PRIVDATACC,       data_acc_error);
        set_excp_vector(EXCEP_DATINSACC,        insn_acc_error);
-       set_excp_vector(EXCEP_FPU_DISABLED,     fpu_disabled);
        set_excp_vector(EXCEP_FPU_UNIMPINS,     fpu_invalid_op);
        set_excp_vector(EXCEP_FPU_OPERATION,    fpu_exception);
 
index 440a7dcbf87b52afb443451b5973d9ad16e48116..a66c6cdaf4424974de52cbc97583b294691e0d12 100644 (file)
@@ -15,7 +15,7 @@
 /*
  * try flipping a bit using BSET and BCLR
  */
-void change_bit(int nr, volatile void *addr)
+void change_bit(unsigned long nr, volatile void *addr)
 {
        if (test_bit(nr, addr))
                goto try_clear_bit;
@@ -34,7 +34,7 @@ try_clear_bit:
 /*
  * try flipping a bit using BSET and BCLR and returning the old value
  */
-int test_and_change_bit(int nr, volatile void *addr)
+int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        if (test_bit(nr, addr))
                goto try_clear_bit;
index fdf6f710f94ec388f8c230afc8bff6139cc6295b..8e7ceb8ba33ddaeaf7487139fb6d9eab5f9788b4 100644 (file)
@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
  */
 void __udelay(unsigned long usecs)
 {
-       signed long ioclk, stop;
+       unsigned long start, stop, cnt;
 
        /* usecs * CLK / 1E6 */
        stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
-       stop = TMTSCBC - stop;
+       start = TMTSCBC;
 
        do {
-               ioclk = TMTSCBC;
-       } while (stop < ioclk);
+               cnt = start - TMTSCBC;
+       } while (cnt < stop);
 }
 EXPORT_SYMBOL(__udelay);
index e138994e1667dc9e45f4a1539b33ebe9ea70874a..1d27bba0cd8fced51a19fe2b77c539f95eaa2c76 100644 (file)
  */
 #include <asm/cache.h>
 
-        .section .text
-        .balign        L1_CACHE_BYTES
+       .section .text
+       .balign L1_CACHE_BYTES
 
 ###############################################################################
 #
-# unsigned int do_csum(const unsigned char *buff, size_t len)
+# unsigned int do_csum(const unsigned char *buff, int len)
 #
 ###############################################################################
        .globl  do_csum
-        .type  do_csum,@function
+       .type   do_csum,@function
 do_csum:
        movm    [d2,d3],(sp)
-       mov     d0,(12,sp)
-       mov     d1,(16,sp)
        mov     d1,d2                           # count
        mov     d0,a0                           # buff
+       mov     a0,a1
        clr     d1                              # accumulator
 
        cmp     +0,d2
-       beq     do_csum_done                    # return if zero-length buffer
+       ble     do_csum_done                    # check for zero length or negative
 
        # 4-byte align the buffer pointer
        btst    +3,a0
@@ -41,17 +40,15 @@ do_csum:
        inc     a0
        asl     +8,d0
        add     d0,d1
-       addc    +0,d1
        add     -1,d2
-do_csum_addr_not_odd:
 
+do_csum_addr_not_odd:
        cmp     +2,d2
        bcs     do_csum_fewer_than_4
        btst    +2,a0
        beq     do_csum_now_4b_aligned
        movhu   (a0+),d0
        add     d0,d1
-       addc    +0,d1
        add     -2,d2
        cmp     +4,d2
        bcs     do_csum_fewer_than_4
@@ -66,20 +63,20 @@ do_csum_now_4b_aligned:
 
 do_csum_loop:
        mov     (a0+),d0
-       add     d0,d1
        mov     (a0+),e0
-       addc    e0,d1
        mov     (a0+),e1
-       addc    e1,d1
        mov     (a0+),e3
+       add     d0,d1
+       addc    e0,d1
+       addc    e1,d1
        addc    e3,d1
        mov     (a0+),d0
-       addc    d0,d1
        mov     (a0+),e0
-       addc    e0,d1
        mov     (a0+),e1
-       addc    e1,d1
        mov     (a0+),e3
+       addc    d0,d1
+       addc    e0,d1
+       addc    e1,d1
        addc    e3,d1
        addc    +0,d1
 
@@ -94,12 +91,12 @@ do_csum_remainder:
        cmp     +16,d2
        bcs     do_csum_fewer_than_16
        mov     (a0+),d0
-       add     d0,d1
        mov     (a0+),e0
-       addc    e0,d1
        mov     (a0+),e1
-       addc    e1,d1
        mov     (a0+),e3
+       add     d0,d1
+       addc    e0,d1
+       addc    e1,d1
        addc    e3,d1
        addc    +0,d1
        add     -16,d2
@@ -131,9 +128,9 @@ do_csum_fewer_than_4:
        xor_cmp d0,d0,+2,d2
        bcs     do_csum_fewer_than_2
        movhu   (a0+),d0
-do_csum_fewer_than_2:
        and     +1,d2
        beq     do_csum_add_last_bit
+do_csum_fewer_than_2:
        movbu   (a0),d3
        add     d3,d0
 do_csum_add_last_bit:
@@ -142,21 +139,19 @@ do_csum_add_last_bit:
 
 do_csum_done:
        # compress the checksum down to 16 bits
-       mov     +0xffff0000,d2
-       and     d1,d2
+       mov     +0xffff0000,d0
+       and     d1,d0
        asl     +16,d1
-       add     d2,d1,d0
+       add     d1,d0
        addc    +0xffff,d0
        lsr     +16,d0
 
        # flip the halves of the word result if the buffer was oddly aligned
-       mov     (12,sp),d1
-       and     +1,d1
+       and     +1,a1
        beq     do_csum_not_oddly_aligned
        swaph   d0,d0                           # exchange bits 15:8 with 7:0
 
 do_csum_not_oddly_aligned:
        ret     [d2,d3],8
 
-do_csum_end:
-       .size   do_csum, do_csum_end-do_csum
+       .size   do_csum, .-do_csum
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644 (file)
index 0000000..c4fd923
--- /dev/null
@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+       prompt "CPU Caching mode"
+       default MN10300_CACHE_WBACK
+       help
+         This option determines the caching mode for the kernel.
+
+         Write-Back caching mode involves the all reads and writes causing
+         the affected cacheline to be read into the cache first before being
+         operated upon. Memory is not then updated by a write until the cache
+         is filled and a cacheline needs to be displaced from the cache to
+         make room. Only at that point is it written back.
+
+         Write-Through caching only fetches cachelines from memory on a
+         read. Writes always get written directly to memory. If the affected
+         cacheline is also in cache, it will be updated too.
+
+         The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+       bool "Write-Back"
+       help
+         The dcache operates in delayed write-back mode.  It must be manually
+         flushed if writes are made that subsequently need to be executed or
+         to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+       bool "Write-Through"
+       help
+         The dcache operates in immediate write-through mode.  Writes are
+         committed to RAM immediately in addition to being stored in the
+         cache.  This means that the written data is immediately available for
+         execution or DMA.
+
+         This is not available for use with an SMP kernel if cache flushing
+         and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+       bool "Disabled"
+       help
+         The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+       def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+       prompt "CPU cache flush/invalidate method"
+       default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+       default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+       depends on MN10300_CACHE_ENABLED
+       help
+         This determines the method by which CPU cache flushing and
+         invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+       bool "Use the cache tag registers directly"
+       depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+       bool "Flush areas by way of automatic purge registers (AM34 only)"
+       depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+       def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+       def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+       def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+       def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+       def_bool n
+
+config MN10300_CACHE_SNOOP
+       bool "Use CPU Cache Snooping"
+       depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+       default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+       def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+       help
+         Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+       def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+       help
+         Set if we need the icache to be invalidated, even if the dcache is in
+         write-through mode and doesn't need flushing.
index 1557277fbc5c03962c56f39b7d1a5687bdea80bd..203fee23f7d70efebe0b6c11eb1bfdb6dc3cae6c 100644 (file)
@@ -2,11 +2,21 @@
 # Makefile for the MN10300-specific memory management code
 #
 
-cacheflush-y   := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y   := cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
 
 cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
 
 obj-y := \
        init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
        misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644 (file)
index 0000000..1dcae02
--- /dev/null
@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+       .am33_2
+
+#ifndef CONFIG_SMP
+       .globl mn10300_dcache_flush
+       .globl mn10300_dcache_flush_page
+       .globl mn10300_dcache_flush_range
+       .globl mn10300_dcache_flush_range2
+       .globl mn10300_dcache_flush_inv
+       .globl mn10300_dcache_flush_inv_page
+       .globl mn10300_dcache_flush_inv_range
+       .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush           = mn10300_local_dcache_flush
+mn10300_dcache_flush_page      = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range     = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2    = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv       = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page  = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2        = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush
+        .type  mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_end
+
+       mov     DCPGCR,a0
+
+       LOCAL_CLI_SAVE(d1)
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       # set mask
+       clr     d0
+       mov     d0,(DCPGMR)
+
+       # area purge
+       #
+       # DCPGCR = DCPGCR_DCP
+       #
+       mov     DCPGCR_DCP,d0
+       mov     d0,(a0)
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_page
+       .globl  mn10300_local_dcache_flush_range
+       .globl  mn10300_local_dcache_flush_range2
+       .type   mn10300_local_dcache_flush_page,@function
+       .type   mn10300_local_dcache_flush_range,@function
+       .type   mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_range:
+       movm    [d2,d3,a2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_range_end
+
+       # calculate alignsize
+       #
+       # alignsize = L1_CACHE_BYTES;
+       # for (i = (end - start - 1) / L1_CACHE_BYTES ;  i > 0; i >>= 1)
+       #     alignsize <<= 1;
+       # d2 = alignsize;
+       #
+       mov     L1_CACHE_BYTES,d2
+       sub     d0,d1,d3
+       add     -1,d3
+       lsr     L1_CACHE_SHIFT,d3
+       beq     2f
+1:
+       add     d2,d2
+       lsr     1,d3
+       bne     1b
+2:
+       mov     d1,a1           # a1 = end
+
+       LOCAL_CLI_SAVE(d3)
+       mov     DCPGCR,a0
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # determine the mask
+       mov     d2,d1
+       add     -1,d1
+       not     d1              # d1 = mask = ~(alignsize-1)
+       mov     d1,(DCPGMR)
+
+       and     d1,d0,a2        # a2 = mask & start
+
+dcpgloop:
+       # area purge
+       mov     a2,d0
+       or      DCPGCR_DCP,d0
+       mov     d0,(a0)         # DCPGCR = (mask & start) | DCPGCR_DCP
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # check purge of end address
+       add     d2,a2           # a2 += alignsize
+       cmp     a1,a2           # if (a2 < end) goto dcpgloop
+       bns     dcpgloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+       ret     [d2,d3,a2],12
+
+       .size   mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+       .size   mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+       .size   mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv
+       .type   mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_inv_end
+
+       mov     DCPGCR,a0
+
+       LOCAL_CLI_SAVE(d1)
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       # set the mask to cover everything
+       clr     d0
+       mov     d0,(DCPGMR)
+
+       # area purge & invalidate
+       mov     DCPGCR_DCP|DCPGCR_DCI,d0
+       mov     d0,(a0)
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv_page
+       .globl  mn10300_local_dcache_flush_inv_range
+       .globl  mn10300_local_dcache_flush_inv_range2
+       .type   mn10300_local_dcache_flush_inv_page,@function
+       .type   mn10300_local_dcache_flush_inv_range,@function
+       .type   mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_inv_range:
+       movm    [d2,d3,a2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_inv_range_end
+
+       # calculate alignsize
+       #
+       # alignsize = L1_CACHE_BYTES;
+       # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+       #     alignsize <<= 1;
+       # d2 = alignsize
+       #
+       mov     L1_CACHE_BYTES,d2
+       sub     d0,d1,d3
+       add     -1,d3
+       lsr     L1_CACHE_SHIFT,d3
+       beq     2f
+1:
+       add     d2,d2
+       lsr     1,d3
+       bne     1b
+2:
+       mov     d1,a1           # a1 = end
+
+       LOCAL_CLI_SAVE(d3)
+       mov     DCPGCR,a0
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # set the mask
+       mov     d2,d1
+       add     -1,d1
+       not     d1              # d1 = mask = ~(alignsize-1)
+       mov     d1,(DCPGMR)
+
+       and     d1,d0,a2        # a2 = mask & start
+
+dcpgivloop:
+       # area purge & invalidate
+       mov     a2,d0
+       or      DCPGCR_DCP|DCPGCR_DCI,d0
+       mov     d0,(a0)         # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # check purge & invalidate of end address
+       add     d2,a2           # a2 += alignsize
+       cmp     a1,a2           # if (a2 < end) goto dcpgivloop
+       bns     dcpgivloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+       .size   mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+       .size   mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644 (file)
index 0000000..5cd6a27
--- /dev/null
@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+       .am33_2
+
+#ifndef CONFIG_SMP
+       .globl mn10300_dcache_flush
+       .globl mn10300_dcache_flush_page
+       .globl mn10300_dcache_flush_range
+       .globl mn10300_dcache_flush_range2
+       .globl mn10300_dcache_flush_inv
+       .globl mn10300_dcache_flush_inv_page
+       .globl mn10300_dcache_flush_inv_range
+       .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush           = mn10300_local_dcache_flush
+mn10300_dcache_flush_page      = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range     = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2    = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv       = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page  = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2        = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush
+        .type  mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_end
+
+       # read the addresses tagged in the cache's tag RAM and attempt to flush
+       # those addresses specifically
+       # - we rely on the hardware to filter out invalid tag entry addresses
+       mov     DCACHE_TAG(0,0),a0              # dcache tag RAM access address
+       mov     DCACHE_PURGE(0,0),a1            # dcache purge request address
+       mov     L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
+
+mn10300_local_dcache_flush_loop:
+       mov     (a0),d0
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+       or      L1_CACHE_TAG_VALID,d0           # retain valid entries in the
+                                               # cache
+       mov     d0,(a1)                         # conditional purge
+
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_page
+       .globl  mn10300_local_dcache_flush_range
+       .globl  mn10300_local_dcache_flush_range2
+       .type   mn10300_local_dcache_flush_page,@function
+       .type   mn10300_local_dcache_flush_range,@function
+       .type   mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_range:
+       movm    [d2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_range_end
+
+       sub     d0,d1,a0
+       cmp     MN10300_DCACHE_FLUSH_BORDER,a0
+       ble     1f
+
+       movm    (sp),[d2]
+       bra     mn10300_local_dcache_flush
+1:
+
+       # round start addr down
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+       mov     d0,a1
+
+       add     L1_CACHE_BYTES,d1                       # round end addr up
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+       # write a request to flush all instances of an address from the cache
+       mov     DCACHE_PURGE(0,0),a0
+       mov     a1,d0
+       and     L1_CACHE_TAG_ENTRY,d0
+       add     d0,a0                           # starting dcache purge control
+                                               # reg address
+
+       sub     a1,d1
+       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
+                                               # examine
+
+       or      L1_CACHE_TAG_VALID,a1           # retain valid entries in the
+                                               # cache
+
+mn10300_local_dcache_flush_range_loop:
+       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
+                                               # all ways
+
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+       ret     [d2],4
+
+       .size   mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+       .size   mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+       .size   mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv
+       .type   mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_inv_end
+
+       mov     L1_CACHE_NENTRIES,d1
+       clr     a1
+
+mn10300_local_dcache_flush_inv_loop:
+       mov     (DCACHE_PURGE_WAY0(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY1(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY2(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY3(0),a1),d0    # unconditional purge
+
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv_page
+       .globl  mn10300_local_dcache_flush_inv_range
+       .globl  mn10300_local_dcache_flush_inv_range2
+       .type   mn10300_local_dcache_flush_inv_page,@function
+       .type   mn10300_local_dcache_flush_inv_range,@function
+       .type   mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_inv_range:
+       movm    [d2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_inv_range_end
+
+       sub     d0,d1,a0
+       cmp     MN10300_DCACHE_FLUSH_INV_BORDER,a0
+       ble     1f
+
+       movm    (sp),[d2]
+       bra     mn10300_local_dcache_flush_inv
+1:
+
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
+                                                               # addr down
+       mov     d0,a1
+
+       add     L1_CACHE_BYTES,d1                       # round end addr up
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+       # write a request to flush and invalidate all instances of an address
+       # from the cache
+       mov     DCACHE_PURGE(0,0),a0
+       mov     a1,d0
+       and     L1_CACHE_TAG_ENTRY,d0
+       add     d0,a0                           # starting dcache purge control
+                                               # reg address
+
+       sub     a1,d1
+       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
+                                               # examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
+                                               # in all ways
+
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+       ret     [d2],4
+       .size   mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+       .size   mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+       .size   mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644 (file)
index 0000000..fdb1a9d
--- /dev/null
@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+       unsigned long start = page_to_phys(page);
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+
+       mn10300_local_dcache_flush_page(start);
+       mn10300_local_icache_inv_page(start);
+
+       smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ *                             single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given.  The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+       unsigned long addr, size, off;
+       struct page *page;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ppte, pte;
+
+       /* work out how much of the page to flush */
+       off = start & ~PAGE_MASK;
+       size = end - start;
+
+       /* get the physical address the page is mapped to from the page
+        * tables */
+       pgd = pgd_offset(current->mm, start);
+       if (!pgd || !pgd_val(*pgd))
+               return;
+
+       pud = pud_offset(pgd, start);
+       if (!pud || !pud_val(*pud))
+               return;
+
+       pmd = pmd_offset(pud, start);
+       if (!pmd || !pmd_val(*pmd))
+               return;
+
+       ppte = pte_offset_map(pmd, start);
+       if (!ppte)
+               return;
+       pte = *ppte;
+       pte_unmap(ppte);
+
+       if (pte_none(pte))
+               return;
+
+       page = pte_page(pte);
+       if (!page)
+               return;
+
+       addr = page_to_phys(page);
+
+       /* flush the dcache and invalidate the icache coverage on that
+        * region */
+       mn10300_local_dcache_flush_range2(addr + off, size);
+       mn10300_local_icache_inv_range2(addr + off, size);
+       smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+       unsigned long start_page, end_page;
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               goto done;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses
+                * directly */
+               start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_local_dcache_flush_range(start_page, end);
+               mn10300_local_icache_inv_range(start_page, end);
+               smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+               if (start_page == start)
+                       goto done;
+               end = start_page;
+       }
+
+       start_page = start & PAGE_MASK;
+       end_page = (end - 1) & PAGE_MASK;
+
+       if (start_page == end_page) {
+               /* the first and last bytes are on the same page */
+               flush_icache_page_range(start, end);
+       } else if (start_page + 1 == end_page) {
+               /* split over two virtually contiguous pages */
+               flush_icache_page_range(start, end_page);
+               flush_icache_page_range(end_page, end);
+       } else {
+               /* more than 2 pages; just flush the entire cache */
+               mn10300_dcache_flush();
+               mn10300_icache_inv();
+               smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+       }
+
+done:
+       smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644 (file)
index c8ed1cb..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-       .am33_2
-       .globl mn10300_dcache_flush
-       .globl mn10300_dcache_flush_page
-       .globl mn10300_dcache_flush_range
-       .globl mn10300_dcache_flush_range2
-       .globl mn10300_dcache_flush_inv
-       .globl mn10300_dcache_flush_inv_page
-       .globl mn10300_dcache_flush_inv_range
-       .globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush:
-       movhu   (CHCTR),d0
-       btst    CHCTR_DCEN,d0
-       beq     mn10300_dcache_flush_end
-
-       # read the addresses tagged in the cache's tag RAM and attempt to flush
-       # those addresses specifically
-       # - we rely on the hardware to filter out invalid tag entry addresses
-       mov     DCACHE_TAG(0,0),a0              # dcache tag RAM access address
-       mov     DCACHE_PURGE(0,0),a1            # dcache purge request address
-       mov     L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
-
-mn10300_dcache_flush_loop:
-       mov     (a0),d0
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
-       or      L1_CACHE_TAG_VALID,d0           # retain valid entries in the
-                                               # cache
-       mov     d0,(a1)                         # conditional purge
-
-mn10300_dcache_flush_skip:
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       add     -1,d1
-       bne     mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush_page:
-       mov     PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
-       add     d0,d1
-mn10300_dcache_flush_range:
-       movm    [d2,d3],(sp)
-
-       movhu   (CHCTR),d2
-       btst    CHCTR_DCEN,d2
-       beq     mn10300_dcache_flush_range_end
-
-       # round start addr down
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
-       mov     d0,a1
-
-       add     L1_CACHE_BYTES,d1                       # round end addr up
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-       # write a request to flush all instances of an address from the cache
-       mov     DCACHE_PURGE(0,0),a0
-       mov     a1,d0
-       and     L1_CACHE_TAG_ENTRY,d0
-       add     d0,a0                           # starting dcache purge control
-                                               # reg address
-
-       sub     a1,d1
-       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
-                                               # examine
-
-       or      L1_CACHE_TAG_VALID,a1           # retain valid entries in the
-                                               # cache
-
-mn10300_dcache_flush_range_loop:
-       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
-                                               # all ways
-
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
-       add     -1,d1
-       bne     mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
-       ret     [d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush_inv:
-       movhu   (CHCTR),d0
-       btst    CHCTR_DCEN,d0
-       beq     mn10300_dcache_flush_inv_end
-
-       # hit each line in the dcache with an unconditional purge
-       mov     DCACHE_PURGE(0,0),a1            # dcache purge request address
-       mov     L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
-
-mn10300_dcache_flush_inv_loop:
-       mov     (a1),d0                         # unconditional purge
-
-       add     L1_CACHE_BYTES,a1
-       add     -1,d1
-       bne     mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush_inv_page:
-       mov     PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
-       add     d0,d1
-mn10300_dcache_flush_inv_range:
-       movm    [d2,d3],(sp)
-       movhu   (CHCTR),d2
-       btst    CHCTR_DCEN,d2
-       beq     mn10300_dcache_flush_inv_range_end
-
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
-                                                               # addr down
-       mov     d0,a1
-
-       add     L1_CACHE_BYTES,d1                       # round end addr up
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-       # write a request to flush and invalidate all instances of an address
-       # from the cache
-       mov     DCACHE_PURGE(0,0),a0
-       mov     a1,d0
-       and     L1_CACHE_TAG_ENTRY,d0
-       add     d0,a0                           # starting dcache purge control
-                                               # reg address
-
-       sub     a1,d1
-       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
-                                               # examine
-
-mn10300_dcache_flush_inv_range_loop:
-       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
-                                               # in all ways
-
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
-       add     -1,d1
-       bne     mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
-       ret     [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644 (file)
index 0000000..c895086
--- /dev/null
@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+       +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+       .am33_2
+
+#ifndef CONFIG_SMP
+       .globl  mn10300_icache_inv
+       .globl  mn10300_icache_inv_page
+       .globl  mn10300_icache_inv_range
+       .globl  mn10300_icache_inv_range2
+       .globl  mn10300_dcache_inv
+       .globl  mn10300_dcache_inv_page
+       .globl  mn10300_dcache_inv_range
+       .globl  mn10300_dcache_inv_range2
+
+mn10300_icache_inv             = mn10300_local_icache_inv
+mn10300_icache_inv_page                = mn10300_local_icache_inv_page
+mn10300_icache_inv_range       = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2      = mn10300_local_icache_inv_range2
+mn10300_dcache_inv             = mn10300_local_dcache_inv
+mn10300_dcache_inv_page                = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range       = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2      = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_icache_inv
+        .type  mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_ICEN,d0
+       beq     mn10300_local_icache_inv_end
+
+       # invalidate
+       or      CHCTR_ICINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+mn10300_local_icache_inv_end:
+       ret     [],0
+       .size   mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv
+       .type   mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_inv_end
+
+       # invalidate
+       or      CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+mn10300_local_dcache_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv_page
+       .globl  mn10300_local_dcache_inv_range
+       .globl  mn10300_local_dcache_inv_range2
+       .type   mn10300_local_dcache_inv_page,@function
+       .type   mn10300_local_dcache_inv_range,@function
+       .type   mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_inv_range:
+       # If we are in writeback mode we check the start and end alignments,
+       # and if they're not cacheline-aligned, we must flush any bits outside
+       # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       btst    ~(L1_CACHE_BYTES-1),d0
+       bne     1f
+       btst    ~(L1_CACHE_BYTES-1),d1
+       beq     2f
+1:
+       bra     mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+       movm    [d2,d3,a2],(sp)
+
+       mov     CHCTR,a0
+       movhu   (a0),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_inv_range_end
+
+       # round the addresses out to be full cachelines, unless we're in
+       # writeback mode, in which case we would be in flush and invalidate by
+       # now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
+                                                               # addr down
+
+       mov     L1_CACHE_BYTES-1,d2
+       add     d2,d1
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1      # round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+       sub     d0,d1,d2                # calculate the total size
+       mov     d0,a2                   # A2 = start address
+       mov     d1,a1                   # A1 = end address
+
+       LOCAL_CLI_SAVE(d3)
+
+       mov     DCPGCR,a0               # make sure the purger isn't busy
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       # skip initial address alignment calculation if address is zero
+       mov     d2,d1
+       cmp     0,a2
+       beq     1f
+
+dcivloop:
+       /* calculate alignsize
+        *
+        * alignsize = L1_CACHE_BYTES;
+        * while (! start & alignsize) {
+        *      alignsize <<=1;
+        * }
+        * d1 = alignsize;
+        */
+       mov     L1_CACHE_BYTES,d1
+       lsr     1,d1
+       setlb
+       add     d1,d1
+       mov     d1,d0
+       and     a2,d0
+       leq
+
+1:
+       /* calculate invsize
+        *
+        * if (totalsize > alignsize) {
+        *      invsize = alignsize;
+        * } else {
+        *      invsize = totalsize;
+        *      tmp = 0x80000000;
+        *      while (! invsize & tmp) {
+        *              tmp >>= 1;
+        *      }
+        *      invsize = tmp;
+        * }
+        * d1 = invsize
+        */
+       cmp     d2,d1
+       bns     2f
+       mov     d2,d1
+
+       mov     0x80000000,d0           # start from 31bit=1
+       setlb
+       lsr     1,d0
+       mov     d0,e0
+       and     d1,e0
+       leq
+       mov     d0,d1
+
+2:
+       /* set mask
+        *
+        * mask = ~(invsize-1);
+        * DCPGMR = mask;
+        */
+       mov     d1,d0
+       add     -1,d0
+       not     d0
+       mov     d0,(DCPGMR)
+
+       # invalidate area
+       mov     a2,d0
+       or      DCPGCR_DCI,d0
+       mov     d0,(a0)                 # DCPGCR = (mask & start) | DCPGCR_DCI
+
+       setlb                           # wait for the purge to complete
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       sub     d1,d2                   # decrease size remaining
+       add     d1,a2                   # increase next start address
+
+       /* check invalidating of end address
+        *
+        * a2 = a2 + invsize
+        * if (a2 < end) {
+        *     goto dcivloop;
+        * } */
+       cmp     a1,a2
+       bns     dcivloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+       .size   mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+       .size   mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_icache_inv_page
+       .globl  mn10300_local_icache_inv_range
+       .globl  mn10300_local_icache_inv_range2
+       .type   mn10300_local_icache_inv_page,@function
+       .type   mn10300_local_icache_inv_range,@function
+       .type   mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+       add     d0,d1
+mn10300_local_icache_inv_range:
+       movm    [d2,d3,a2],(sp)
+
+       mov     CHCTR,a0
+       movhu   (a0),d2
+       btst    CHCTR_ICEN,d2
+       beq     mn10300_local_icache_inv_range_reg_end
+
+       /* calculate alignsize
+        *
+        * alignsize = L1_CACHE_BYTES;
+        * for (i = (end - start - 1) / L1_CACHE_BYTES ;  i > 0; i >>= 1) {
+        *     alignsize <<= 1;
+        * }
+        * d2 = alignsize;
+        */
+       mov     L1_CACHE_BYTES,d2
+       sub     d0,d1,d3
+       add     -1,d3
+       lsr     L1_CACHE_SHIFT,d3
+       beq     2f
+1:
+       add     d2,d2
+       lsr     1,d3
+       bne     1b
+2:
+
+       /* a1 = end */
+       mov     d1,a1
+
+       LOCAL_CLI_SAVE(d3)
+
+       mov     ICIVCR,a0
+       /* wait for busy bit of area invalidation */
+       setlb
+       mov     (a0),d1
+       btst    ICIVCR_ICIVBSY,d1
+       lne
+
+       /* set mask
+        *
+        * mask = ~(alignsize-1);
+        * ICIVMR = mask;
+        */
+       mov     d2,d1
+       add     -1,d1
+       not     d1
+       mov     d1,(ICIVMR)
+       /* a2 = mask & start */
+       and     d1,d0,a2
+
+icivloop:
+       /* area invalidate
+        *
+        * ICIVCR = (mask & start) | ICIVCR_ICI
+        */
+       mov     a2,d0
+       or      ICIVCR_ICI,d0
+       mov     d0,(a0)
+
+       /* wait for busy bit of area invalidation */
+       setlb
+       mov     (a0),d1
+       btst    ICIVCR_ICIVBSY,d1
+       lne
+
+       /* check invalidating of end address
+        *
+        * a2 = a2 + alignsize
+        * if (a2 < end) {
+        *     goto icivloop;
+        * } */
+       add     d2,a2
+       cmp     a1,a2
+       bns     icivloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+       .size   mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+       .size   mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644 (file)
index 0000000..e9713b4
--- /dev/null
@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+       +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+       .am33_2
+
+       .globl  mn10300_local_icache_inv_page
+       .globl  mn10300_local_icache_inv_range
+       .globl  mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page  = mn10300_local_icache_inv
+mn10300_local_icache_inv_range = mn10300_local_icache_inv
+mn10300_local_icache_inv_range2        = mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+       .globl  mn10300_icache_inv
+       .globl  mn10300_icache_inv_page
+       .globl  mn10300_icache_inv_range
+       .globl  mn10300_icache_inv_range2
+       .globl  mn10300_dcache_inv
+       .globl  mn10300_dcache_inv_page
+       .globl  mn10300_dcache_inv_range
+       .globl  mn10300_dcache_inv_range2
+
+mn10300_icache_inv             = mn10300_local_icache_inv
+mn10300_icache_inv_page                = mn10300_local_icache_inv_page
+mn10300_icache_inv_range       = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2      = mn10300_local_icache_inv_range2
+mn10300_dcache_inv             = mn10300_local_dcache_inv
+mn10300_dcache_inv_page                = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range       = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2      = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_icache_inv
+        .type  mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_ICEN,d0
+       beq     mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+       LOCAL_CLI_SAVE(d1)
+
+       # disable the icache
+       and     ~CHCTR_ICEN,d0
+       movhu   d0,(a0)
+
+       # and wait for it to calm down
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_ICBUSY,d0
+       lne
+
+       # invalidate
+       or      CHCTR_ICINV,d0
+       movhu   d0,(a0)
+
+       # wait for the cache to finish
+       mov     CHCTR,a0
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_ICBUSY,d0
+       lne
+
+       # and reenable it
+       and     ~CHCTR_ICINV,d0
+       or      CHCTR_ICEN,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+       LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+       # invalidate
+       or      CHCTR_ICINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+       ret     [],0
+       .size   mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv
+       .type   mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+       LOCAL_CLI_SAVE(d1)
+
+       # disable the dcache
+       and     ~CHCTR_DCEN,d0
+       movhu   d0,(a0)
+
+       # and wait for it to calm down
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+       # invalidate
+       or      CHCTR_DCINV,d0
+       movhu   d0,(a0)
+
+       # wait for the cache to finish
+       mov     CHCTR,a0
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+       # and reenable it
+       and     ~CHCTR_DCINV,d0
+       or      CHCTR_DCEN,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+       LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+       # invalidate
+       or      CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv_page
+       .globl  mn10300_local_dcache_inv_range
+       .globl  mn10300_local_dcache_inv_range2
+       .type   mn10300_local_dcache_inv_page,@function
+       .type   mn10300_local_dcache_inv_range,@function
+       .type   mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_inv_range:
+       # If we are in writeback mode we check the start and end alignments,
+       # and if they're not cacheline-aligned, we must flush any bits outside
+       # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       btst    ~(L1_CACHE_BYTES-1),d0
+       bne     1f
+       btst    ~(L1_CACHE_BYTES-1),d1
+       beq     2f
+1:
+       bra     mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+       movm    [d2,d3,a2],(sp)
+
+       mov     CHCTR,a2
+       movhu   (a2),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
+                                                               # addr down
+
+       add     L1_CACHE_BYTES,d1               # round end addr up
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+       mov     d0,a1
+
+       clr     d2                              # we're going to clear tag RAM
+                                               # entries
+
+       # read the tags from the tag RAM, and if they indicate a valid dirty
+       # cache line then invalidate that line
+       mov     DCACHE_TAG(0,0),a0
+       mov     a1,d0
+       and     L1_CACHE_TAG_ENTRY,d0
+       add     d0,a0                           # starting dcache tag RAM
+                                               # access address
+
+       sub     a1,d1
+       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
+                                               # examine
+
+       and     ~(L1_CACHE_DISPARITY-1),a1      # determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+       LOCAL_CLI_SAVE(d3)
+
+       # disable the dcache
+       movhu   (a2),d0
+       and     ~CHCTR_DCEN,d0
+       movhu   d0,(a2)
+
+       # and wait for it to calm down
+       setlb
+       movhu   (a2),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+mn10300_local_dcache_inv_range_loop:
+
+       # process the way 0 slot
+       mov     (L1_CACHE_WAYDISP*0,a0),d0      # read the tag in the way 0 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_0   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_0   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*0,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+       # process the way 1 slot
+       mov     (L1_CACHE_WAYDISP*1,a0),d0      # read the tag in the way 1 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_1   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_1   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*1,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+       # process the way 2 slot
+       mov     (L1_CACHE_WAYDISP*2,a0),d0      # read the tag in the way 2 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_2   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_2   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*2,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+       # process the way 3 slot
+       mov     (L1_CACHE_WAYDISP*3,a0),d0      # read the tag in the way 3 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_3   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_3   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*3,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+       # approx every N steps we re-enable the cache and see if there are any
+       # interrupts to be processed
+       # we also break out if we've reached the end of the loop
+       # (the bottom nibble of the count is zero in both cases)
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       and     ~L1_CACHE_WAYDISP,a0
+       add     -1,d1
+       btst    mn10300_local_dcache_inv_range_intr_interval,d1
+       bne     mn10300_local_dcache_inv_range_loop
+
+       # wait for the cache to finish what it's doing
+       setlb
+       movhu   (a2),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+       # and reenable it
+       or      CHCTR_DCEN,d0
+       movhu   d0,(a2)
+       movhu   (a2),d0
+
+       # re-enable interrupts
+       # - we don't bother with delay NOPs as we'll have enough instructions
+       #   before we disable interrupts again to give the interrupts a chance
+       #   to happen
+       LOCAL_IRQ_RESTORE(d3)
+
+       # go around again if the counter hasn't yet reached zero
+       add     0,d1
+       bne     mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+       .size   mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+       .size   mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644 (file)
index 0000000..a8933a6
--- /dev/null
@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ *                             single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given.  The page must be in the paged area.  The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+       unsigned long addr, size, off;
+       struct page *page;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ppte, pte;
+
+       /* work out how much of the page to flush */
+       off = start & ~PAGE_MASK;
+       size = end - start;
+
+       /* get the physical address the page is mapped to from the page
+        * tables */
+       pgd = pgd_offset(current->mm, start);
+       if (!pgd || !pgd_val(*pgd))
+               return;
+
+       pud = pud_offset(pgd, start);
+       if (!pud || !pud_val(*pud))
+               return;
+
+       pmd = pmd_offset(pud, start);
+       if (!pmd || !pmd_val(*pmd))
+               return;
+
+       ppte = pte_offset_map(pmd, start);
+       if (!ppte)
+               return;
+       pte = *ppte;
+       pte_unmap(ppte);
+
+       if (pte_none(pte))
+               return;
+
+       page = pte_page(pte);
+       if (!page)
+               return;
+
+       addr = page_to_phys(page);
+
+       /* invalidate the icache coverage on that region */
+       mn10300_local_icache_inv_range2(addr + off, size);
+       smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+       unsigned long start_page, end_page;
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               goto done;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses
+                * directly */
+               start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_icache_inv_range(start_page, end);
+               smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+               if (start_page == start)
+                       goto done;
+               end = start_page;
+       }
+
+       start_page = start & PAGE_MASK;
+       end_page = (end - 1) & PAGE_MASK;
+
+       if (start_page == end_page) {
+               /* the first and last bytes are on the same page */
+               flush_icache_page_range(start, end);
+       } else if (start_page + 1 == end_page) {
+               /* split over two virtually contiguous pages */
+               flush_icache_page_range(start, end_page);
+               flush_icache_page_range(end_page, end);
+       } else {
+               /* more than 2 pages; just flush the entire cache */
+               mn10300_local_icache_inv();
+               smp_cache_call(SMP_ICACHE_INV, 0, 0);
+       }
+
+done:
+       smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644 (file)
index e839d0a..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
-       +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
-       .am33_2
-
-       .globl mn10300_icache_inv
-       .globl mn10300_dcache_inv
-       .globl mn10300_dcache_inv_range
-       .globl mn10300_dcache_inv_range2
-       .globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
-       ALIGN
-mn10300_icache_inv:
-       mov     CHCTR,a0
-
-       movhu   (a0),d0
-       btst    CHCTR_ICEN,d0
-       beq     mn10300_icache_inv_end
-
-       mov     epsw,d1
-       and     ~EPSW_IE,epsw
-       nop
-       nop
-
-       # disable the icache
-       and     ~CHCTR_ICEN,d0
-       movhu   d0,(a0)
-
-       # and wait for it to calm down
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_ICBUSY,d0
-       lne
-
-       # invalidate
-       or      CHCTR_ICINV,d0
-       movhu   d0,(a0)
-
-       # wait for the cache to finish
-       mov     CHCTR,a0
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_ICBUSY,d0
-       lne
-
-       # and reenable it
-       and     ~CHCTR_ICINV,d0
-       or      CHCTR_ICEN,d0
-       movhu   d0,(a0)
-       movhu   (a0),d0
-
-       mov     d1,epsw
-
-mn10300_icache_inv_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_inv:
-       mov     CHCTR,a0
-
-       movhu   (a0),d0
-       btst    CHCTR_DCEN,d0
-       beq     mn10300_dcache_inv_end
-
-       mov     epsw,d1
-       and     ~EPSW_IE,epsw
-       nop
-       nop
-
-       # disable the dcache
-       and     ~CHCTR_DCEN,d0
-       movhu   d0,(a0)
-
-       # and wait for it to calm down
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-       # invalidate
-       or      CHCTR_DCINV,d0
-       movhu   d0,(a0)
-
-       # wait for the cache to finish
-       mov     CHCTR,a0
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-       # and reenable it
-       and     ~CHCTR_DCINV,d0
-       or      CHCTR_DCEN,d0
-       movhu   d0,(a0)
-       movhu   (a0),d0
-
-       mov     d1,epsw
-
-mn10300_dcache_inv_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_inv_page:
-       mov     PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
-       add     d0,d1
-mn10300_dcache_inv_range:
-       movm    [d2,d3,a2],(sp)
-       mov     CHCTR,a2
-
-       movhu   (a2),d2
-       btst    CHCTR_DCEN,d2
-       beq     mn10300_dcache_inv_range_end
-
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
-                                                               # addr down
-       mov     d0,a1
-
-       add     L1_CACHE_BYTES,d1                       # round end addr up
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-       clr     d2                              # we're going to clear tag ram
-                                               # entries
-
-       # read the tags from the tag RAM, and if they indicate a valid dirty
-       # cache line then invalidate that line
-       mov     DCACHE_TAG(0,0),a0
-       mov     a1,d0
-       and     L1_CACHE_TAG_ENTRY,d0
-       add     d0,a0                           # starting dcache tag RAM
-                                               # access address
-
-       sub     a1,d1
-       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
-                                               # examine
-
-       and     ~(L1_CACHE_DISPARITY-1),a1      # determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
-       # disable interrupts
-       mov     epsw,d3
-       and     ~EPSW_IE,epsw
-       nop                                     # note that reading CHCTR and
-                                               # AND'ing D0 occupy two delay
-                                               # slots after disabling
-                                               # interrupts
-
-       # disable the dcache
-       movhu   (a2),d0
-       and     ~CHCTR_DCEN,d0
-       movhu   d0,(a2)
-
-       # and wait for it to calm down
-       setlb
-       movhu   (a2),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-mn10300_dcache_inv_range_loop:
-
-       # process the way 0 slot
-       mov     (L1_CACHE_WAYDISP*0,a0),d0      # read the tag in the way 0 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
-       # process the way 1 slot
-       mov     (L1_CACHE_WAYDISP*1,a0),d0      # read the tag in the way 1 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
-       # process the way 2 slot
-       mov     (L1_CACHE_WAYDISP*2,a0),d0      # read the tag in the way 2 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
-       # process the way 3 slot
-       mov     (L1_CACHE_WAYDISP*3,a0),d0      # read the tag in the way 3 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
-       # approx every N steps we re-enable the cache and see if there are any
-       # interrupts to be processed
-       # we also break out if we've reached the end of the loop
-       # (the bottom nibble of the count is zero in both cases)
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       add     -1,d1
-       btst    mn10300_dcache_inv_range_intr_interval,d1
-       bne     mn10300_dcache_inv_range_loop
-
-       # wait for the cache to finish what it's doing
-       setlb
-       movhu   (a2),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-       # and reenable it
-       or      CHCTR_DCEN,d0
-       movhu   d0,(a2)
-       movhu   (a2),d0
-
-       # re-enable interrupts
-       # - we don't bother with delay NOPs as we'll have enough instructions
-       #   before we disable interrupts again to give the interrupts a chance
-       #   to happen
-       mov     d3,epsw
-
-       # go around again if the counter hasn't yet reached zero
-       add     0,d1
-       bne     mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
-       ret     [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644 (file)
index 0000000..fd51af5
--- /dev/null
@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush();
+       smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_page(start);
+       smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_range(start, end);
+       smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_range2(start, size);
+       smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv();
+       smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ *     cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv_page(start);
+       smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ *     cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv_range(start, end);
+       smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ *     cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv_range2(start, size);
+       smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644 (file)
index 0000000..ff17873
--- /dev/null
@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv();
+       smp_cache_call(SMP_ICACHE_INV, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv_page(start);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv_range(start, end);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv_range2(start, size);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv();
+       smp_cache_call(SMP_DCACHE_INV, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv_page(start);
+       smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv_range(start, end);
+       smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv_range2(start, size);
+       smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644 (file)
index 0000000..4a6e9a4
--- /dev/null
@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map;            /* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches.  The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+       unsigned long opr_mask = smp_cache_mask;
+
+       switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+       case SMP_DCACHE_NOP:
+               break;
+       case SMP_DCACHE_INV:
+               mn10300_local_dcache_inv();
+               break;
+       case SMP_DCACHE_INV_RANGE:
+               mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+               break;
+       case SMP_DCACHE_FLUSH:
+               mn10300_local_dcache_flush();
+               break;
+       case SMP_DCACHE_FLUSH_RANGE:
+               mn10300_local_dcache_flush_range(smp_cache_start,
+                                                smp_cache_end);
+               break;
+       case SMP_DCACHE_FLUSH_INV:
+               mn10300_local_dcache_flush_inv();
+               break;
+       case SMP_DCACHE_FLUSH_INV_RANGE:
+               mn10300_local_dcache_flush_inv_range(smp_cache_start,
+                                                    smp_cache_end);
+               break;
+       }
+
+       switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+       case SMP_ICACHE_NOP:
+               break;
+       case SMP_ICACHE_INV:
+               mn10300_local_icache_inv();
+               break;
+       case SMP_ICACHE_INV_RANGE:
+               mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+               break;
+       }
+
+       cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs.  This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+                   unsigned long start, unsigned long end)
+{
+       smp_cache_mask = opr_mask;
+       smp_cache_start = start;
+       smp_cache_end = end;
+       smp_cache_ipi_map = cpu_online_map;
+       cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+       send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+       while (!cpus_empty(smp_cache_ipi_map))
+               /* nothing. lockup detection does not belong here */
+               mb();
+}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644 (file)
index 0000000..cb52892
--- /dev/null
@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+       SMP_ICACHE_NOP                  = 0x0000,
+       SMP_ICACHE_INV                  = 0x0001,
+       SMP_ICACHE_INV_RANGE            = 0x0002,
+};
+#define SMP_ICACHE_OP_MASK             0x0003
+
+enum smp_dcache_ops {
+       SMP_DCACHE_NOP                  = 0x0000,
+       SMP_DCACHE_INV                  = 0x0004,
+       SMP_DCACHE_INV_RANGE            = 0x0008,
+       SMP_DCACHE_FLUSH                = 0x000c,
+       SMP_DCACHE_FLUSH_RANGE          = 0x0010,
+       SMP_DCACHE_FLUSH_INV            = 0x0014,
+       SMP_DCACHE_FLUSH_INV_RANGE      = 0x0018,
+};
+#define SMP_DCACHE_OP_MASK             0x001c
+
+#define        SMP_IDCACHE_INV_FLUSH           (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE    (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+                          unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+       __acquires(&smp_cache_lock)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&smp_cache_lock, flags);
+       return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+       __releases(&smp_cache_lock)
+{
+       spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+                                 unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */
index 9261217e8d2c5741bb500b829bbd7663859b5541..0a1f0aa92ebc78ff9881993e16d3015cb524cb96 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
 
 EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
 EXPORT_SYMBOL(mn10300_dcache_inv);
 EXPORT_SYMBOL(mn10300_dcache_inv_range);
 EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -36,96 +41,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_range2);
 EXPORT_SYMBOL(mn10300_dcache_flush_page);
 #endif
 
-/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-       mn10300_dcache_flush_page(page_to_phys(page));
-       mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
-       unsigned long addr, size, base, off;
-       struct page *page;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ppte, pte;
-
-       if (end > 0x80000000UL) {
-               /* addresses above 0xa0000000 do not go through the cache */
-               if (end > 0xa0000000UL) {
-                       end = 0xa0000000UL;
-                       if (start >= end)
-                               return;
-               }
-
-               /* kernel addresses between 0x80000000 and 0x9fffffff do not
-                * require page tables, so we just map such addresses directly */
-               base = (start >= 0x80000000UL) ? start : 0x80000000UL;
-               mn10300_dcache_flush_range(base, end);
-               if (base == start)
-                       goto invalidate;
-               end = base;
-       }
-
-       for (; start < end; start += size) {
-               /* work out how much of the page to flush */
-               off = start & (PAGE_SIZE - 1);
-
-               size = end - start;
-               if (size > PAGE_SIZE - off)
-                       size = PAGE_SIZE - off;
-
-               /* get the physical address the page is mapped to from the page
-                * tables */
-               pgd = pgd_offset(current->mm, start);
-               if (!pgd || !pgd_val(*pgd))
-                       continue;
-
-               pud = pud_offset(pgd, start);
-               if (!pud || !pud_val(*pud))
-                       continue;
-
-               pmd = pmd_offset(pud, start);
-               if (!pmd || !pmd_val(*pmd))
-                       continue;
-
-               ppte = pte_offset_map(pmd, start);
-               if (!ppte)
-                       continue;
-               pte = *ppte;
-               pte_unmap(ppte);
-
-               if (pte_none(pte))
-                       continue;
-
-               page = pte_page(pte);
-               if (!page)
-                       continue;
-
-               addr = page_to_phys(page);
-
-               /* flush the dcache and invalidate the icache coverage on that
-                * region */
-               mn10300_dcache_flush_range2(addr + off, size);
-       }
-#endif
-
-invalidate:
-       mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
 /*
  * allow userspace to flush the instruction cache
  */
index 81f153fa51b4a6ecb07111f1d20383fbf0b6eaf2..59c3da49d9d9e4ad08108bf2f1eebe8182f46e0c 100644 (file)
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
 {
        if (yes) {
                oops_in_progress = 1;
-#ifdef CONFIG_SMP
-               /* Many serial drivers do __global_cli() */
-               global_irq_lock = 0;
-#endif
        } else {
                int loglevel_save = console_loglevel;
 #ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
 }
 #endif
 
-asmlinkage void monitor_signal(struct pt_regs *);
-
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
  */
 bad_area:
        up_read(&mm->mmap_sem);
-       monitor_signal(regs);
 
        /* User mode accesses just cause a SIGSEGV */
        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
        }
 
 no_context:
-       monitor_signal(regs);
        /* Are we prepared to handle this kernel fault?  */
        if (fixup_exception(regs))
                return;
@@ -338,14 +330,13 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
-               goto no_context;
-       pagefault_out_of_memory();
-       return;
+       printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+               do_exit(SIGKILL);
+       goto no_context;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
-       monitor_signal(regs);
 
        /*
         * Send a sigbus, regardless of whether we were in kernel
index 6e6bc0e51521811895e4c3e11e73fdb2fdc49118..48907cc3bdb77311526c1b1ca173b20983091f8d 100644 (file)
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 unsigned long highstart_pfn, highend_pfn;
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
 /*
  * set up paging
  */
@@ -73,7 +77,24 @@ void __init paging_init(void)
        /* pass the memory from the bootmem allocator to the main allocator */
        free_area_init(zones_size);
 
-       __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       /* The Atomic Operation Unit registers need to be mapped to userspace
+        * for all processes.  The following uses vm_area_register_early() to
+        * reserve the first page of the vmalloc area and sets the pte for that
+        * page.
+        *
+        * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+        * it from now on.
+        */
+       user_iomap_vm.flags = VM_USERMAP;
+       user_iomap_vm.size = 1 << PAGE_SHIFT;
+       vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+       ppte = kernel_vmalloc_ptes;
+       set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+                             PAGE_USERIO));
+#endif
+
+       local_flush_tlb_all();
 }
 
 /*
@@ -84,8 +105,7 @@ void __init mem_init(void)
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
-       if (!mem_map)
-               BUG();
+       BUG_ON(!mem_map);
 
 #define START_PFN      (contig_page_data.bdata->node_min_pfn)
 #define MAX_LOW_PFN    (contig_page_data.bdata->node_low_pfn)
index 6dffbf97ac2601d40ceb91a2e4a5116fd1bceeb8..eef989c1d0c10baf8614edbd8f87ea09c4d2cf91 100644 (file)
@@ -449,8 +449,7 @@ found_opcode:
               regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
 
        tmp = format_tbl[pop->format].opsz;
-       if (tmp > noc)
-               BUG(); /* match was less complete than it ought to have been */
+       BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
 
        if (tmp < noc) {
                tmp = noc - tmp;
index 36ba02191d408251f9eaefa366610824e34d3d69..a4f7d3dcc6e6ff8d5028e6e60de4a8281b219702 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
 
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
 /*
  * list of the MMU contexts last allocated on each CPU
  */
 unsigned long mmu_context_cache[NR_CPUS] = {
-       [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+       [0 ... NR_CPUS - 1] =
+       MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
 };
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       unsigned long pteu, cnx, flags;
-
-       addr &= PAGE_MASK;
-
-       /* make sure the context doesn't migrate and defend against
-        * interference from vmalloc'd regions */
-       local_irq_save(flags);
-
-       cnx = mm_context(vma->vm_mm);
-
-       if (cnx != MMU_NO_CONTEXT) {
-               pteu = addr | (cnx & 0x000000ffUL);
-               IPTEU = pteu;
-               DPTEU = pteu;
-               if (IPTEL & xPTEL_V)
-                       IPTEL = 0;
-               if (DPTEL & xPTEL_V)
-                       DPTEL = 0;
-       }
-
-       local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
 
 /*
  * preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
         * interference from vmalloc'd regions */
        local_irq_save(flags);
 
+       cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
        cnx = mm_context(vma->vm_mm);
+#endif
 
        if (cnx != MMU_NO_CONTEXT) {
-               pteu = addr | (cnx & 0x000000ffUL);
+               pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+               pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
                if (!(pte_val(pte) & _PAGE_NX)) {
                        IPTEU = pteu;
                        if (IPTEL & xPTEL_V)
index 9c1624c9e4e9fa05f33d621877c931dbb9ae1abb..450f7ba3f8f2ed8aaa39cfa111782be77df2c834 100644 (file)
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
         * It's enough to flush this one mapping.
         * (PGE mappings get flushed as well)
         */
-       __flush_tlb_one(vaddr);
+       local_flush_tlb_one(vaddr);
 }
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
index 7095147dcb8ba2f83bbad869b7b2e4366dd4a6a5..b9940177d81be5f6026f38e769c05163f6d46215 100644 (file)
@@ -27,7 +27,6 @@
 ###############################################################################
        .type   itlb_miss,@function
 ENTRY(itlb_miss)
-       and     ~EPSW_NMID,epsw
 #ifdef CONFIG_GDBSTUB
        movm    [d2,d3,a2],(sp)
 #else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
        nop
 #endif
 
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d2
+       mov     d2,(MMUCTR)
+#endif
+
+       and     ~EPSW_NMID,epsw
        mov     (IPTEU),d3
        mov     (PTBR),a2
        mov     d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
        btst    _PAGE_VALID,d2
        beq     itlb_miss_fault         # jump if doesn't point to a page
                                        # (might be a swap id)
+#if    ((_PAGE_ACCESSED & 0xffffff00) == 0)
        bset    _PAGE_ACCESSED,(0,a2)
-       and     ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif  ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+       bset    +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+       and     ~xPTEL2_UNUSED1,d2
 itlb_miss_set:
-       mov     d2,(IPTEL             # change the TLB
+       mov     d2,(IPTEL2)             # change the TLB
 #ifdef CONFIG_GDBSTUB
        movm    (sp),[d2,d3,a2]
 #endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
 ###############################################################################
        .type   dtlb_miss,@function
 ENTRY(dtlb_miss)
-       and     ~EPSW_NMID,epsw
 #ifdef CONFIG_GDBSTUB
        movm    [d2,d3,a2],(sp)
 #else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
        nop
 #endif
 
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d2
+       mov     d2,(MMUCTR)
+#endif
+
+       and     ~EPSW_NMID,epsw
        mov     (DPTEU),d3
        mov     (PTBR),a2
        mov     d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
        btst    _PAGE_VALID,d2
        beq     dtlb_miss_fault         # jump if doesn't point to a page
                                        # (might be a swap id)
+#if    ((_PAGE_ACCESSED & 0xffffff00) == 0)
        bset    _PAGE_ACCESSED,(0,a2)
-       and     ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif  ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+       bset    +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+       and     ~xPTEL2_UNUSED1,d2
 dtlb_miss_set:
-       mov     d2,(DPTEL             # change the TLB
+       mov     d2,(DPTEL2)             # change the TLB
 #ifdef CONFIG_GDBSTUB
        movm    (sp),[d2,d3,a2]
 #endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
 ###############################################################################
        .type   itlb_aerror,@function
 ENTRY(itlb_aerror)
-       and     ~EPSW_NMID,epsw
        add     -4,sp
        SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d1
+       mov     d1,(MMUCTR)
+#endif
+
+       and     ~EPSW_NMID,epsw
        add     -4,sp                           # need to pass three params
 
        # calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
        or      0x00010000,d1                   # it's an instruction fetch
 
        # determine the page address
-       mov     (IPTEU),a2
-       mov     a2,d0
+       mov     (IPTEU),d0
        and     PAGE_MASK,d0
        mov     d0,(12,sp)
 
        clr     d0
-       mov     d0,(IPTEL)
+       mov     d0,(IPTEL2)
 
-       and     ~EPSW_NMID,epsw
        or      EPSW_IE,epsw
        mov     fp,d0
        call    do_page_fault[],0               # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
 ###############################################################################
        .type   dtlb_aerror,@function
 ENTRY(dtlb_aerror)
-       and     ~EPSW_NMID,epsw
        add     -4,sp
        SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d1
+       mov     d1,(MMUCTR)
+#endif
+
        add     -4,sp                           # need to pass three params
+       and     ~EPSW_NMID,epsw
 
        # calculate the fault code
        movhu   (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
        mov     d0,(12,sp)
 
        clr     d0
-       mov     d0,(DPTEL)
+       mov     d0,(DPTEL2)
 
-       and     ~EPSW_NMID,epsw
        or      EPSW_IE,epsw
        mov     fp,d0
        call    do_page_fault[],0               # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644 (file)
index 0000000..0b6a5ad
--- /dev/null
@@ -0,0 +1,214 @@
+/* SMP TLB support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+
+/*
+ * For flush TLB
+ */
+#define FLUSH_ALL      0xffffffff
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+       &init_mm, 0
+};
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+                            unsigned long va);
+static void do_flush_tlb_all(void *info);
+
+/**
+ * smp_flush_tlb - Callback to invalidate the TLB.
+ * @unused: Callback context (ignored).
+ */
+void smp_flush_tlb(void *unused)
+{
+       unsigned long cpu_id;
+
+       cpu_id = get_cpu();
+
+       if (!cpu_isset(cpu_id, flush_cpumask))
+               /* This was a BUG() but until someone can quote me the line
+                * from the intel manual that guarantees an IPI to multiple
+                * CPUs is retried _only_ on the erroring CPUs its staying as a
+                * return
+                *
+                * BUG();
+                */
+               goto out;
+
+       if (flush_va == FLUSH_ALL)
+               local_flush_tlb();
+       else
+               local_flush_tlb_page(flush_mm, flush_va);
+
+       smp_mb__before_clear_bit();
+       cpu_clear(cpu_id, flush_cpumask);
+       smp_mb__after_clear_bit();
+out:
+       put_cpu();
+}
+
+/**
+ * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
+ * @cpumask: The list of CPUs to target.
+ * @mm: The VM context to flush from (if va!=FLUSH_ALL).
+ * @va: Virtual address to flush or FLUSH_ALL to flush everything.
+ */
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+                            unsigned long va)
+{
+       cpumask_t tmp;
+
+       /* A couple of sanity checks (to be removed):
+        * - mask must not be empty
+        * - current CPU must not be in mask
+        * - we do not send IPIs to as-yet unbooted CPUs.
+        */
+       BUG_ON(!mm);
+       BUG_ON(cpus_empty(cpumask));
+       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+
+       cpus_and(tmp, cpumask, cpu_online_map);
+       BUG_ON(!cpus_equal(cpumask, tmp));
+
+       /* I'm not happy about this global shared spinlock in the MM hot path,
+        * but we'll see how contended it is.
+        *
+        * Temporarily this turns IRQs off, so that lockups are detected by the
+        * NMI watchdog.
+        */
+       spin_lock(&tlbstate_lock);
+
+       flush_mm = mm;
+       flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+       atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+#else
+#error Not supported.
+#endif
+
+       /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
+       smp_call_function(smp_flush_tlb, NULL, 1);
+
+       while (!cpus_empty(flush_cpumask))
+               /* Lockup detection does not belong here */
+               smp_mb();
+
+       flush_mm = NULL;
+       flush_va = 0;
+       spin_unlock(&tlbstate_lock);
+}
+
+/**
+ * flush_tlb_mm - Invalidate TLB of specified VM context
+ * @mm: The VM context to invalidate.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       cpumask_t cpu_mask;
+
+       preempt_disable();
+       cpu_mask = mm->cpu_vm_mask;
+       cpu_clear(smp_processor_id(), cpu_mask);
+
+       local_flush_tlb();
+       if (!cpus_empty(cpu_mask))
+               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+       preempt_enable();
+}
+
+/**
+ * flush_tlb_current_task - Invalidate TLB of current task
+ */
+void flush_tlb_current_task(void)
+{
+       struct mm_struct *mm = current->mm;
+       cpumask_t cpu_mask;
+
+       preempt_disable();
+       cpu_mask = mm->cpu_vm_mask;
+       cpu_clear(smp_processor_id(), cpu_mask);
+
+       local_flush_tlb();
+       if (!cpus_empty(cpu_mask))
+               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+       preempt_enable();
+}
+
+/**
+ * flush_tlb_page - Invalidate TLB of page
+ * @vma: The VM context to invalidate the page for.
+ * @va: The virtual address of the page to invalidate.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       cpumask_t cpu_mask;
+
+       preempt_disable();
+       cpu_mask = mm->cpu_vm_mask;
+       cpu_clear(smp_processor_id(), cpu_mask);
+
+       local_flush_tlb_page(mm, va);
+       if (!cpus_empty(cpu_mask))
+               flush_tlb_others(cpu_mask, mm, va);
+
+       preempt_enable();
+}
+
+/**
+ * do_flush_tlb_all - Callback to completely invalidate a TLB
+ * @unused: Callback context (ignored).
+ */
+static void do_flush_tlb_all(void *unused)
+{
+       local_flush_tlb_all();
+}
+
+/**
+ * flush_tlb_all - Completely invalidate TLBs on all CPUs
+ */
+void flush_tlb_all(void)
+{
+       on_each_cpu(do_flush_tlb_all, 0, 1);
+}
index bdc1f9a59b4ccb360fddd5f88898a06d83cedafe..c1528004163ce8fe54418368ce3aea757e31104f 100644 (file)
  */
 #define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL    4
 
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER    \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER        \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
 #endif /* _ASM_PROC_CACHE_H */
index aa23e147d620f3d1d3b3fbce9641862d2dcb7e95..704a819f1f4b123c0b2a37171aa429c0abe48c81 100644 (file)
@@ -13,6 +13,4 @@
 
 #include <unit/clock.h>
 
-#define MN10300_WDCLK          MN10300_IOCLK
-
 #endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
new file mode 100644 (file)
index 0000000..d72d328
--- /dev/null
@@ -0,0 +1,102 @@
+/* MN103E010 on-board DMA controller registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define        DMxCTR(N)               __SYSREG(0xd2000000 + ((N) * 0x100), u32)       /* control reg */
+#define        DMxCTR_BG               0x0000001f      /* transfer request source */
+#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
+#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
+#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
+#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
+#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
+#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
+#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
+#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
+#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
+#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
+#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
+#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
+#define        DMxCTR_BG_AFE           0x0000000d      /* - analogue front-end interrupt source */
+#define        DMxCTR_BG_ADC           0x0000000e      /* - A/D conversion end interrupt source */
+#define        DMxCTR_BG_IRDA          0x0000000f      /* - IrDA interrupt source */
+#define        DMxCTR_BG_RTC           0x00000010      /* - RTC interrupt source */
+#define        DMxCTR_BG_XIRQ0         0x00000011      /* - XIRQ0 pin interrupt source */
+#define        DMxCTR_BG_XIRQ1         0x00000012      /* - XIRQ1 pin interrupt source */
+#define        DMxCTR_BG_XDMR0         0x00000013      /* - external request 0 source (XDMR0 pin) */
+#define        DMxCTR_BG_XDMR1         0x00000014      /* - external request 1 source (XDMR1 pin) */
+#define        DMxCTR_SAM              0x000000e0      /* DMA transfer src addr mode */
+#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
+#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
+#define        DMxCTR_DAM              0x00000000      /* DMA transfer dest addr mode */
+#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
+#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
+#define        DMxCTR_TM               0x00001800      /* DMA transfer mode */
+#define        DMxCTR_TM_BATCH         0x00000000      /* - batch transfer */
+#define        DMxCTR_TM_INTERM        0x00001000      /* - intermittent transfer */
+#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
+#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
+#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
+#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
+#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
+#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
+#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
+#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
+#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
+#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
+#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
+#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
+#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
+
+#define        DMxSRC(N)               __SYSREG(0xd2000004 + ((N) * 0x100), u32)       /* control reg */
+
+#define        DMxDST(N)               __SYSREG(0xd2000008 + ((N) * 0x100), u32)       /* src addr reg */
+
+#define        DMxSIZ(N)               __SYSREG(0xd200000c + ((N) * 0x100), u32)       /* dest addr reg */
+#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
+
+#define        DMxCYC(N)               __SYSREG(0xd2000010 + ((N) * 0x100), u32)       /* intermittent
+                                                                                * size reg */
+#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
+
+#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
+#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
+#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
+#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
+
+#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
+#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
+#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
+#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+       u32             ctr;
+       const void      *src;
+       void            *dst;
+       u32             siz;
+       u32             cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
new file mode 100644 (file)
index 0000000..f537801
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR                   __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN                        0x00fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z)            \
+({                                             \
+       typeof(Z) x = (Z);                      \
+       x &= ~(3 << ((X) * 2));                 \
+       x |= ((Y) & 3) << ((X) * 2);            \
+       (Z) = x;                                \
+})
+
+/* external pin intr spec reg */
+#define EXTMD                  __SYSREG(0xd4000200, u16)
+#define GET_XIRQ_TRIGGER(X)    __GET_XIRQ_TRIGGER(X, EXTMD)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
index 22a2b93f70b7415cc1b775b3bd2e92eb43d61d3d..39c4f8e7d2d322f3fd2b4bb3c25ab3ac8ca1ff6d 100644 (file)
@@ -12,7 +12,7 @@
 #ifndef _ASM_PROC_PROC_H
 #define _ASM_PROC_PROC_H
 
-#define PROCESSOR_VENDOR_NAME  "Matsushita"
+#define PROCESSOR_VENDOR_NAME  "Panasonic"
 #define PROCESSOR_MODEL_NAME   "mn103e010"
 
 #endif /* _ASM_PROC_PROC_H */
index 9a482efafa82d34c1b03c3d5b136eb7eda6551c8..27b97980dca4d15eb05080ceedb92f873009eabd 100644 (file)
@@ -9,7 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 #include <linux/kernel.h>
+#include <asm/fpu.h>
 #include <asm/rtc.h>
+#include <asm/busctl-regs.h>
 
 /*
  * initialise the on-silicon processor peripherals
@@ -28,6 +30,7 @@ asmlinkage void __init processor_init(void)
        __set_intr_stub(EXCEP_DAERROR,          dtlb_aerror);
        __set_intr_stub(EXCEP_BUSERROR,         raw_bus_error);
        __set_intr_stub(EXCEP_DOUBLE_FAULT,     double_fault);
+       __set_intr_stub(EXCEP_FPU_DISABLED,     fpu_disabled);
        __set_intr_stub(EXCEP_SYSCALL0,         system_call);
 
        __set_intr_stub(EXCEP_NMI,              nmi_handler);
@@ -73,3 +76,37 @@ asmlinkage void __init processor_init(void)
 
        calibrate_clock();
 }
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+       unsigned long base, size;
+
+       *mem_base = 0;
+       *mem_size = 0;
+
+       base = SDBASE(0);
+       if (base & SDBASE_CE) {
+               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+               size = ~size + 1;
+               base &= SDBASE_CBA;
+
+               printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
+               *mem_size += size;
+               *mem_base = base;
+       }
+
+       base = SDBASE(1);
+       if (base & SDBASE_CE) {
+               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+               size = ~size + 1;
+               base &= SDBASE_CBA;
+
+               printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
+               *mem_size += size;
+               if (*mem_base == 0)
+                       *mem_base = base;
+       }
+}
diff --git a/arch/mn10300/proc-mn2ws0050/Makefile b/arch/mn10300/proc-mn2ws0050/Makefile
new file mode 100644 (file)
index 0000000..d4ca133
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y   := proc-init.o
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
new file mode 100644 (file)
index 0000000..cafd7b5
--- /dev/null
@@ -0,0 +1,48 @@
+/* Cache specification
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Add L1_CACHE_SHIFT_MAX definition.
+ *  29-Jul-2008 MEI Add define for MN10300_HAS_AREAPURGE_REG.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CACHE_H
+#define _ASM_PROC_CACHE_H
+
+/*
+ * L1 cache
+ */
+#define L1_CACHE_NWAYS         4               /* number of ways in caches */
+#define L1_CACHE_NENTRIES      128             /* number of entries in each way */
+#define L1_CACHE_BYTES         32              /* bytes per entry */
+#define L1_CACHE_SHIFT         5               /* shift for bytes per entry */
+#define L1_CACHE_WAYDISP       0x1000          /* distance from one way to the next */
+
+#define L1_CACHE_TAG_VALID     0x00000001      /* cache tag valid bit */
+#define L1_CACHE_TAG_DIRTY     0x00000008      /* data cache tag dirty bit */
+#define L1_CACHE_TAG_ENTRY     0x00000fe0      /* cache tag entry address mask */
+#define L1_CACHE_TAG_ADDRESS   0xfffff000      /* cache tag line address mask */
+
+/*
+ * specification of the interval between interrupt checking intervals whilst
+ * managing the cache with the interrupts disabled
+ */
+#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL    4
+
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER        \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
+#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/clock.h b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
new file mode 100644 (file)
index 0000000..fe4c0a4
--- /dev/null
@@ -0,0 +1,20 @@
+/* clock.h: proc-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  23-Feb-2007 MEI Delete define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CLOCK_H
+#define _ASM_PROC_CLOCK_H
+
+#include <unit/clock.h>
+
+#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
new file mode 100644 (file)
index 0000000..4c4319e
--- /dev/null
@@ -0,0 +1,103 @@
+/* MN2WS0050 on-board DMA controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define        DMxCTR(N)               __SYSREG(0xd4005000+(N*0x100), u32)     /* control reg */
+#define        DMxCTR_BG               0x0000001f      /* transfer request source */
+#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
+#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
+#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
+#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
+#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
+#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
+#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
+#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
+#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
+#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
+#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
+#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
+#define        DMxCTR_BG_RYBY          0x0000000d      /* - NAND Flash RY/BY request source */
+#define        DMxCTR_BG_RMC           0x0000000e      /* - remote controller output */
+#define        DMxCTR_BG_XIRQ12        0x00000011      /* - XIRQ12 pin interrupt source */
+#define        DMxCTR_BG_XIRQ13        0x00000012      /* - XIRQ13 pin interrupt source */
+#define        DMxCTR_BG_TCK           0x00000014      /* - tick timer underflow */
+#define        DMxCTR_BG_SC4TX         0x00000019      /* - serial port4 transmission */
+#define        DMxCTR_BG_SC4RX         0x0000001a      /* - serial port4 reception */
+#define        DMxCTR_BG_SC5TX         0x0000001b      /* - serial port5 transmission */
+#define        DMxCTR_BG_SC5RX         0x0000001c      /* - serial port5 reception */
+#define        DMxCTR_BG_SC6TX         0x0000001d      /* - serial port6 transmission */
+#define        DMxCTR_BG_SC6RX         0x0000001e      /* - serial port6 reception */
+#define        DMxCTR_BG_TMSUFLOW      0x0000001f      /* - timestamp timer underflow */
+#define        DMxCTR_SAM              0x00000060      /* DMA transfer src addr mode */
+#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
+#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
+#define        DMxCTR_DAM              0x00000300      /* DMA transfer dest addr mode */
+#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
+#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
+#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
+#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
+#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
+#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
+#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
+#define DMxCTR_RRE             0x00008000      /* DMA round robin enable */
+#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
+#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
+#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
+#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
+#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
+#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
+#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
+#define        DMxCTR_PERR             0x40000000      /* DMA transfer parameter error flag */
+#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
+
+#define        DMxSRC(N)               __SYSREG(0xd4005004+(N*0x100), u32)     /* control reg */
+
+#define        DMxDST(N)               __SYSREG(0xd4005008+(N*0x100), u32)     /* source addr reg */
+
+#define        DMxSIZ(N)               __SYSREG(0xd400500c+(N*0x100), u32)     /* dest addr reg */
+#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
+
+#define        DMxCYC(N)               __SYSREG(0xd4005010+(N*0x100), u32)     /* intermittent size reg */
+#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
+
+#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
+#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
+#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
+#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
+
+#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
+#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
+#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
+#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+       u32             ctr;
+       const void      *src;
+       void            *dst;
+       u32             siz;
+       u32             cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
new file mode 100644 (file)
index 0000000..a1e9772
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR                   __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN                        0x003fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z)            \
+({                                             \
+       typeof(Z) x = (Z);                      \
+       x &= ~(3 << ((X) * 2));                 \
+       x |= ((Y) & 3) << ((X) * 2);            \
+       (Z) = x;                                \
+})
+
+/* external pin intr spec reg */
+#define EXTMD0                 __SYSREG(0xd4000200, u32)
+#define GET_XIRQ_TRIGGER(X)    __GET_XIRQ_TRIGGER(X, EXTMD0)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD0)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/irq.h b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
new file mode 100644 (file)
index 0000000..37777a8
--- /dev/null
@@ -0,0 +1,49 @@
+/* MN2WS0050 on-board interrupt controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Define extended IRQ number for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PROC_IRQ_H
+#define _PROC_IRQ_H
+
+#ifdef __KERNEL__
+
+#define GxICR_NUM_IRQS         163
+#ifdef CONFIG_SMP
+#define GxICR_NUM_EXT_IRQS     197
+#endif  /* CONFIG_SMP */
+
+#define GxICR_NUM_XIRQS                16
+
+#define XIRQ0          34
+#define XIRQ1          35
+#define XIRQ2          36
+#define XIRQ3          37
+#define XIRQ4          38
+#define XIRQ5          39
+#define XIRQ6          40
+#define XIRQ7          41
+#define XIRQ8          42
+#define XIRQ9          43
+#define XIRQ10         44
+#define XIRQ11         45
+#define XIRQ12         46
+#define XIRQ13         47
+#define XIRQ14         48
+#define XIRQ15         49
+
+#define XIRQ2IRQ(num)  (XIRQ0 + num)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PROC_IRQ_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
new file mode 100644 (file)
index 0000000..84448f3
--- /dev/null
@@ -0,0 +1,120 @@
+/* NAND flash interface register definitions
+ *
+ * Copyright (C) 2008-2009 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef        _PROC_NAND_REGS_H_
+#define        _PROC_NAND_REGS_H_
+
+/* command register */
+#define FCOMMAND_0             __SYSREG(0xd8f00000, u8) /* fcommand[24:31] */
+#define FCOMMAND_1             __SYSREG(0xd8f00001, u8) /* fcommand[16:23] */
+#define FCOMMAND_2             __SYSREG(0xd8f00002, u8) /* fcommand[8:15] */
+#define FCOMMAND_3             __SYSREG(0xd8f00003, u8) /* fcommand[0:7] */
+
+/* for dma 16 byte trans, use FCOMMAND2 register */
+#define FCOMMAND2_0            __SYSREG(0xd8f00110, u8) /* fcommand2[24:31] */
+#define FCOMMAND2_1            __SYSREG(0xd8f00111, u8) /* fcommand2[16:23] */
+#define FCOMMAND2_2            __SYSREG(0xd8f00112, u8) /* fcommand2[8:15] */
+#define FCOMMAND2_3            __SYSREG(0xd8f00113, u8) /* fcommand2[0:7] */
+
+#define FCOMMAND_FIEN          0x80            /* nand flash I/F enable */
+#define FCOMMAND_BW_8BIT       0x00            /* 8bit bus width */
+#define FCOMMAND_BW_16BIT      0x40            /* 16bit bus width */
+#define FCOMMAND_BLOCKSZ_SMALL 0x00            /* small block */
+#define FCOMMAND_BLOCKSZ_LARGE 0x20            /* large block */
+#define FCOMMAND_DMASTART      0x10            /* dma start */
+#define FCOMMAND_RYBY          0x08            /* ready/busy flag */
+#define FCOMMAND_RYBYINTMSK    0x04            /* mask ready/busy interrupt */
+#define FCOMMAND_XFWP          0x02            /* write protect enable */
+#define FCOMMAND_XFCE          0x01            /* flash device disable */
+#define FCOMMAND_SEQKILL       0x10            /* stop seq-read */
+#define FCOMMAND_ANUM          0x07            /* address cycle */
+#define FCOMMAND_ANUM_NONE     0x00            /* address cycle none */
+#define FCOMMAND_ANUM_1CYC     0x01            /* address cycle 1cycle */
+#define FCOMMAND_ANUM_2CYC     0x02            /* address cycle 2cycle */
+#define FCOMMAND_ANUM_3CYC     0x03            /* address cycle 3cycle */
+#define FCOMMAND_ANUM_4CYC     0x04            /* address cycle 4cycle */
+#define FCOMMAND_ANUM_5CYC     0x05            /* address cycle 5cycle */
+#define FCOMMAND_FCMD_READ0    0x00            /* read1 command */
+#define FCOMMAND_FCMD_SEQIN    0x80            /* page program 1st command */
+#define FCOMMAND_FCMD_PAGEPROG 0x10            /* page program 2nd command */
+#define FCOMMAND_FCMD_RESET    0xff            /* reset command */
+#define FCOMMAND_FCMD_ERASE1   0x60            /* erase 1st command */
+#define FCOMMAND_FCMD_ERASE2   0xd0            /* erase 2nd command */
+#define FCOMMAND_FCMD_STATUS   0x70            /* read status command */
+#define FCOMMAND_FCMD_READID   0x90            /* read id command */
+#define FCOMMAND_FCMD_READOOB  0x50            /* read3 command */
+/* address register */
+#define FADD                   __SYSREG(0xd8f00004, u32)
+/* address register 2 */
+#define FADD2                  __SYSREG(0xd8f00008, u32)
+/* error judgement register */
+#define FJUDGE                 __SYSREG(0xd8f0000c, u32)
+#define FJUDGE_NOERR           0x0             /* no error */
+#define FJUDGE_1BITERR         0x1             /* 1bit error in data area */
+#define FJUDGE_PARITYERR       0x2             /* parity error */
+#define FJUDGE_UNCORRECTABLE   0x3             /* uncorrectable error */
+#define FJUDGE_ERRJDG_MSK      0x3             /* mask of judgement result */
+/* 1st ECC store register */
+#define FECC11                 __SYSREG(0xd8f00010, u32)
+/* 2nd ECC store register */
+#define FECC12                 __SYSREG(0xd8f00014, u32)
+/* 3rd ECC store register */
+#define FECC21                 __SYSREG(0xd8f00018, u32)
+/* 4th ECC store register */
+#define FECC22                 __SYSREG(0xd8f0001c, u32)
+/* 5th ECC store register */
+#define FECC31                 __SYSREG(0xd8f00020, u32)
+/* 6th ECC store register */
+#define FECC32                 __SYSREG(0xd8f00024, u32)
+/* 7th ECC store register */
+#define FECC41                 __SYSREG(0xd8f00028, u32)
+/* 8th ECC store register */
+#define FECC42                 __SYSREG(0xd8f0002c, u32)
+/* data register */
+#define FDATA                  __SYSREG(0xd8f00030, u32)
+/* access pulse register */
+#define FPWS                   __SYSREG(0xd8f00100, u32)
+#define FPWS_PWS1W_2CLK                0x00000000 /* write pulse width 1clock */
+#define FPWS_PWS1W_3CLK                0x01000000 /* write pulse width 2clock */
+#define FPWS_PWS1W_4CLK                0x02000000 /* write pulse width 4clock */
+#define FPWS_PWS1W_5CLK                0x03000000 /* write pulse width 5clock */
+#define FPWS_PWS1W_6CLK                0x04000000 /* write pulse width 6clock */
+#define FPWS_PWS1W_7CLK                0x05000000 /* write pulse width 7clock */
+#define FPWS_PWS1W_8CLK                0x06000000 /* write pulse width 8clock */
+#define FPWS_PWS1R_3CLK                0x00010000 /* read pulse width 3clock */
+#define FPWS_PWS1R_4CLK                0x00020000 /* read pulse width 4clock */
+#define FPWS_PWS1R_5CLK                0x00030000 /* read pulse width 5clock */
+#define FPWS_PWS1R_6CLK                0x00040000 /* read pulse width 6clock */
+#define FPWS_PWS1R_7CLK                0x00050000 /* read pulse width 7clock */
+#define FPWS_PWS1R_8CLK                0x00060000 /* read pulse width 8clock */
+#define FPWS_PWS2W_2CLK                0x00000100 /* write pulse interval 2clock */
+#define FPWS_PWS2W_3CLK                0x00000200 /* write pulse interval 3clock */
+#define FPWS_PWS2W_4CLK                0x00000300 /* write pulse interval 4clock */
+#define FPWS_PWS2W_5CLK                0x00000400 /* write pulse interval 5clock */
+#define FPWS_PWS2W_6CLK                0x00000500 /* write pulse interval 6clock */
+#define FPWS_PWS2R_2CLK                0x00000001 /* read pulse interval 2clock */
+#define FPWS_PWS2R_3CLK                0x00000002 /* read pulse interval 3clock */
+#define FPWS_PWS2R_4CLK                0x00000003 /* read pulse interval 4clock */
+#define FPWS_PWS2R_5CLK                0x00000004 /* read pulse interval 5clock */
+#define FPWS_PWS2R_6CLK                0x00000005 /* read pulse interval 6clock */
+/* command register 2 */
+#define FCOMMAND2              __SYSREG(0xd8f00110, u32)
+/* transfer frequency register */
+#define FNUM                   __SYSREG(0xd8f00114, u32)
+#define FSDATA_ADDR            0xd8f00400
+/* active data register */
+#define FSDATA                 __SYSREG(FSDATA_ADDR, u32)
+
+#endif /* _PROC_NAND_REGS_H_ */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/proc.h b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
new file mode 100644 (file)
index 0000000..90d5cad
--- /dev/null
@@ -0,0 +1,18 @@
+/* proc.h: MN2WS0050 processor description
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_PROC_H
+#define _ASM_PROC_PROC_H
+
+#define PROCESSOR_VENDOR_NAME  "Panasonic"
+#define PROCESSOR_MODEL_NAME   "mn2ws0050"
+
+#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
new file mode 100644 (file)
index 0000000..22f277f
--- /dev/null
@@ -0,0 +1,51 @@
+/* MN10300/AM33v2 Microcontroller SMP registers
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ * Created:
+ *  13-Nov-2006 MEI Add extended cache and atomic operation register
+ *                  for SMP support.
+ *  23-Feb-2007 MEI Add define for gdbstub SMP.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_SMP_REGS_H
+#define _ASM_PROC_SMP_REGS_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm/cpu-regs.h>
+
+/*
+ * Reference to the interrupt controllers of other CPUs
+ */
+#define CROSS_ICR_CPU_SHIFT    16
+
+#define CROSS_GxICR(X, CPU)    __SYSREG(0xc4000000 + (X) * 4 + \
+       ((X) >= 64 && (X) < 192) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u16)
+#define CROSS_GxICR_u8(X, CPU) __SYSREG(0xc4000000 + (X) * 4 +         \
+       (((X) >= 64) && ((X) < 192)) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u8)
+
+/* CPU ID register */
+#define CPUID          __SYSREGC(0xc0000054, u32)
+#define CPUID_MASK     0x00000007      /* CPU ID mask */
+
+/* extended cache control register */
+#define ECHCTR         __SYSREG(0xc0000c20, u32)
+#define ECHCTR_IBCM    0x00000001      /* instruction cache broad cast mask */
+#define ECHCTR_DBCM    0x00000002      /* data cache broad cast mask */
+#define ECHCTR_ISPM    0x00000004      /* instruction cache snoop mask */
+#define ECHCTR_DSPM    0x00000008      /* data cache snoop mask */
+
+#define NMIAGR         __SYSREG(0xd400013c, u16)
+#define NMIAGR_GN      0x03fc
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_PROC_SMP_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c
new file mode 100644 (file)
index 0000000..c58249b
--- /dev/null
@@ -0,0 +1,134 @@
+/* MN2WS0050 processor initialisation
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/busctl-regs.h>
+#include <unit/timex.h>
+#include <asm/fpu.h>
+#include <asm/rtc.h>
+
+#define MEMCONF __SYSREGC(0xdf800400, u32)
+
+/*
+ * initialise the on-silicon processor peripherals
+ */
+asmlinkage void __init processor_init(void)
+{
+       int loop;
+
+       /* set up the exception table first */
+       for (loop = 0x000; loop < 0x400; loop += 8)
+               __set_intr_stub(loop, __common_exception);
+
+       __set_intr_stub(EXCEP_ITLBMISS,         itlb_miss);
+       __set_intr_stub(EXCEP_DTLBMISS,         dtlb_miss);
+       __set_intr_stub(EXCEP_IAERROR,          itlb_aerror);
+       __set_intr_stub(EXCEP_DAERROR,          dtlb_aerror);
+       __set_intr_stub(EXCEP_BUSERROR,         raw_bus_error);
+       __set_intr_stub(EXCEP_DOUBLE_FAULT,     double_fault);
+       __set_intr_stub(EXCEP_FPU_DISABLED,     fpu_disabled);
+       __set_intr_stub(EXCEP_SYSCALL0,         system_call);
+
+       __set_intr_stub(EXCEP_NMI,              nmi_handler);
+       __set_intr_stub(EXCEP_WDT,              nmi_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL0,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL1,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL2,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL3,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL4,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL5,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL6,       irq_handler);
+
+       IVAR0 = EXCEP_IRQ_LEVEL0;
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+       IVAR6 = EXCEP_IRQ_LEVEL6;
+
+#ifndef CONFIG_MN10300_HAS_CACHE_SNOOP
+       mn10300_dcache_flush_inv();
+       mn10300_icache_inv();
+#endif
+
+       /* disable all interrupts and set to priority 6 (lowest) */
+#ifdef CONFIG_SMP
+       for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+               GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#else  /* !CONFIG_SMP */
+       for (loop = 0; loop < NR_IRQS; loop++)
+               GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#endif /* !CONFIG_SMP */
+
+       /* clear the timers */
+       TM0MD   = 0;
+       TM1MD   = 0;
+       TM2MD   = 0;
+       TM3MD   = 0;
+       TM4MD   = 0;
+       TM5MD   = 0;
+       TM6MD   = 0;
+       TM6MDA  = 0;
+       TM6MDB  = 0;
+       TM7MD   = 0;
+       TM8MD   = 0;
+       TM9MD   = 0;
+       TM10MD  = 0;
+       TM11MD  = 0;
+       TM12MD  = 0;
+       TM13MD  = 0;
+       TM14MD  = 0;
+       TM15MD  = 0;
+
+       calibrate_clock();
+}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+       unsigned long memconf = MEMCONF;
+       unsigned long size = 0; /* order: MByte */
+
+       *mem_base = 0x90000000; /* fixed address */
+
+       switch (memconf & 0x00000003) {
+       case 0x01:
+               size = 256 / 8;         /* 256 Mbit per chip */
+               break;
+       case 0x02:
+               size = 512 / 8;         /* 512 Mbit per chip */
+               break;
+       case 0x03:
+               size = 1024 / 8;        /*   1 Gbit per chip */
+               break;
+       default:
+               panic("Invalid SDRAM size");
+               break;
+       }
+
+       printk(KERN_INFO "DDR2-SDRAM: %luMB x 2 @%08lx\n", size, *mem_base);
+
+       *mem_size = (size * 2) << 20;
+}
index 2a0bf79ab968162eb514c2190dc9a8e136845645..0316907a012ef9ec14f5677ea9bdd3bb2de68917 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk;    /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK          mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK      mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
 #define MN10300_IOCLK          33333333UL
 /* #define MN10300_IOBCLK      66666666UL */
 
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK          MN10300_IOCLK
-#define MN10300_TSCCLK         MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ     mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ     (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
 #endif /* !__ASSEMBLY__ */
 
+#define MN10300_WDCLK          MN10300_IOCLK
+
 #endif /* _ASM_UNIT_CLOCK_H */
index 047566cd2e36d2677b33803c13384f906f17c0b7..991e356bac5f91c11a7efa563217b48f61b130bc 100644 (file)
 
 #define SERIAL_IRQ     XIRQ0   /* Dual serial (PC16552)        (Hi) */
 
+/*
+ * The ASB2303 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD      (18432000 / 16)
+
 /*
  * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
  */
index f206b63c95b4f282114315870d12458a19c6a700..cc18fe7d8b90e2abc9061b430b7072386aeed0d8 100644 (file)
@@ -1,6 +1,6 @@
-/* ASB2303-specific timer specifcations
+/* ASB2303-specific timer specifications
  *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
+#include <asm/param.h>
 
 /*
  * jiffies counter specifications
  */
 
 #define        TMJCBR_MAX              0xffff
-#define        TMJCBC                  TM01BC
-
-#define        TMJCMD                  TM01MD
-#define        TMJCBR                  TM01BR
 #define        TMJCIRQ                 TM1IRQ
 #define        TMJCICR                 TM1ICR
-#define        TMJCICR_LEVEL           GxICR_LEVEL_5
 
 #ifndef __ASSEMBLY__
 
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK      MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                1
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                8
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                32
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK          (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK         (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ      ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ     ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
 {
-       unsigned rate;
-       u16 md, t16;
-
-       /* use as little prescaling as possible to avoid losing accuracy */
-       md = TM0MD_SRC_IOCLK;
-       rate = MN10300_JCCLK / HZ;
-
-       if (rate > TMJCBR_MAX) {
-               md = TM0MD_SRC_IOCLK_8;
-               rate = MN10300_JCCLK / 8 / HZ;
-
-               if (rate > TMJCBR_MAX) {
-                       md = TM0MD_SRC_IOCLK_32;
-                       rate = MN10300_JCCLK / 32 / HZ;
-
-                       if (rate > TMJCBR_MAX)
-                               BUG();
-               }
-       }
+       u16 tmp;
+       TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+       tmp = TM01MD;
+}
 
-       TMJCBR = rate - 1;
-       t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+       u32 tmp;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_INIT_COUNTER |
-               TM1MD_INIT_COUNTER << 8;
+       TM01BR = cnt;
+       tmp = TM01BR;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_COUNT_ENABLE |
-               TM1MD_COUNT_ENABLE << 8;
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_INIT_COUNTER |           \
+                TM1MD_INIT_COUNTER << 8;
 
-       t16 = TMJCMD;
 
-       TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       t16 = TMJCICR;
-}
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_COUNT_ENABLE |           \
+                TM1MD_COUNT_ENABLE << 8;
 
-static inline void shutdown_jiffies_counter(void)
-{
+       tmp = TM01MD;
 }
 
 #endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
 
 static inline void startup_timestamp_counter(void)
 {
+       u32 t32;
+
        /* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
         * - count down from 4Gig-1 to 0 and wrap at IOCLK rate
         */
        TM45BR = TMTSCBR_MAX;
+       t32 = TM45BR;
 
-       TM4MD = TM4MD_SRC_IOCLK;
+       TM4MD = TSC_TIMER_CLKSRC;
        TM4MD |= TM4MD_INIT_COUNTER;
        TM4MD &= ~TM4MD_INIT_COUNTER;
        TM4ICR = 0;
+       t32 = TM4ICR;
 
        TM5MD = TM5MD_SRC_TM4CASCADE;
        TM5MD |= TM5MD_INIT_COUNTER;
        TM5MD &= ~TM5MD_INIT_COUNTER;
        TM5ICR = 0;
+       t32 = TM5ICR;
 
        TM5MD |= TM5MD_COUNT_ENABLE;
        TM4MD |= TM4MD_COUNT_ENABLE;
+       t32 = TM5MD;
+       t32 = TM4MD;
 }
 
 static inline void shutdown_timestamp_counter(void)
 {
+       u8 t8;
        TM4MD = 0;
        TM5MD = 0;
+       t8 = TM4MD;
+       t8 = TM5MD;
 }
 
 /*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
 
 static inline cycles_t read_timestamp_counter(void)
 {
-       return (cycles_t)TMTSCBC;
+       return (cycles_t)~TMTSCBC;
 }
 
 #endif /* !__ASSEMBLY__ */
index 70e8cb4ea266153e0d0217933ff84e4f61bed7e1..834a76aa551a06d37ceeae41d2b2d65f7b33409b 100644 (file)
@@ -31,6 +31,14 @@ asmlinkage void __init unit_init(void)
        SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL);
        SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL);
        SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL);
+
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+       set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#endif
+
+#ifdef CONFIG_ETHERNET_IRQ_LEVEL
+       set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
 }
 
 /*
@@ -51,7 +59,7 @@ void __init unit_init_IRQ(void)
                switch (GET_XIRQ_TRIGGER(extnum)) {
                case XIRQ_TRIGGER_HILEVEL:
                case XIRQ_TRIGGER_LOWLEVEL:
-                       set_intr_postackable(XIRQ2IRQ(extnum));
+                       mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
                        break;
                default:
                        break;
index 67be3f2eb18e928b68363601ae5440bb737b9e2b..29e3425431cfc868b57f73b7d59602f176aa9615 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk;    /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK          mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK      mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
 #define MN10300_IOCLK          33333333UL
 /* #define MN10300_IOBCLK      66666666UL */
 
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK          MN10300_IOCLK
-#define MN10300_TSCCLK         MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ     mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ     (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
 #endif /* !__ASSEMBLY__ */
 
+#define MN10300_WDCLK          MN10300_IOCLK
+
 #endif /* _ASM_UNIT_CLOCK_H */
index 8086cc092cecca946aac943387c0a019e8be8c4a..88c08219315f5b175869120ae1dd8b4a8f9c1722 100644 (file)
 
 #define SERIAL_IRQ     XIRQ0   /* Dual serial (PC16552)        (Hi) */
 
+/*
+ * The ASB2305 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD      (18432000 / 16)
+
 /*
  * dispose of the /dev/ttyS0 serial port
  */
index d1c72d59fa9fa92e4ce0710979c14b9c879d5e6d..758af30d1a16aad5b74820431e5abda8cc4ed1df 100644 (file)
@@ -1,6 +1,6 @@
-/* ASB2305 timer specifcations
+/* ASB2305-specific timer specifications
  *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
+#include <asm/param.h>
 
 /*
  * jiffies counter specifications
  */
 
 #define        TMJCBR_MAX              0xffff
-#define        TMJCBC                  TM01BC
-
-#define        TMJCMD                  TM01MD
-#define        TMJCBR                  TM01BR
 #define        TMJCIRQ                 TM1IRQ
 #define        TMJCICR                 TM1ICR
-#define        TMJCICR_LEVEL           GxICR_LEVEL_5
 
 #ifndef __ASSEMBLY__
 
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK      MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                1
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                8
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                32
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK          (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK         (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ      ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ     ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
 {
-       unsigned rate;
-       u16 md, t16;
-
-       /* use as little prescaling as possible to avoid losing accuracy */
-       md = TM0MD_SRC_IOCLK;
-       rate = MN10300_JCCLK / HZ;
-
-       if (rate > TMJCBR_MAX) {
-               md = TM0MD_SRC_IOCLK_8;
-               rate = MN10300_JCCLK / 8 / HZ;
-
-               if (rate > TMJCBR_MAX) {
-                       md = TM0MD_SRC_IOCLK_32;
-                       rate = MN10300_JCCLK / 32 / HZ;
-
-                       if (rate > TMJCBR_MAX)
-                               BUG();
-               }
-       }
+       u16 tmp;
+       TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+       tmp = TM01MD;
+}
 
-       TMJCBR = rate - 1;
-       t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+       u32 tmp;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_INIT_COUNTER |
-               TM1MD_INIT_COUNTER << 8;
+       TM01BR = cnt;
+       tmp = TM01BR;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_COUNT_ENABLE |
-               TM1MD_COUNT_ENABLE << 8;
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_INIT_COUNTER |           \
+                TM1MD_INIT_COUNTER << 8;
 
-       t16 = TMJCMD;
 
-       TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       t16 = TMJCICR;
-}
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_COUNT_ENABLE |           \
+                TM1MD_COUNT_ENABLE << 8;
 
-static inline void shutdown_jiffies_counter(void)
-{
+       tmp = TM01MD;
 }
 
 #endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
 
 static inline void startup_timestamp_counter(void)
 {
+       u32 t32;
+
        /* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
         * - count down from 4Gig-1 to 0 and wrap at IOCLK rate
         */
        TM45BR = TMTSCBR_MAX;
+       t32 = TM45BR;
 
-       TM4MD = TM4MD_SRC_IOCLK;
+       TM4MD = TSC_TIMER_CLKSRC;
        TM4MD |= TM4MD_INIT_COUNTER;
        TM4MD &= ~TM4MD_INIT_COUNTER;
        TM4ICR = 0;
+       t32 = TM4ICR;
 
        TM5MD = TM5MD_SRC_TM4CASCADE;
        TM5MD |= TM5MD_INIT_COUNTER;
        TM5MD &= ~TM5MD_INIT_COUNTER;
        TM5ICR = 0;
+       t32 = TM5ICR;
 
        TM5MD |= TM5MD_COUNT_ENABLE;
        TM4MD |= TM4MD_COUNT_ENABLE;
+       t32 = TM5MD;
+       t32 = TM4MD;
 }
 
 static inline void shutdown_timestamp_counter(void)
 {
+       u8 t8;
        TM4MD = 0;
        TM5MD = 0;
+       t8 = TM4MD;
+       t8 = TM5MD;
 }
 
 /*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
 
 static inline cycles_t read_timestamp_counter(void)
 {
-       return (cycles_t) TMTSCBC;
+       return (cycles_t)~TMTSCBC;
 }
 
 #endif /* !__ASSEMBLY__ */
index 45b40ac6c4647b3bb0751a1352e21e2e0a66ae3e..8e6763e6f25011f9d4eb68509c12ccb0dcb5014a 100644 (file)
@@ -93,7 +93,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
        struct pci_bus *bus;
        struct pci_dev *dev;
        int idx;
-       struct resource *r, *pr;
+       struct resource *r;
 
        /* Depth-First Search on bus tree */
        list_for_each_entry(bus, bus_list, node) {
@@ -105,10 +105,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
                                r = &dev->resource[idx];
                                if (!r->flags)
                                        continue;
-                               pr = pci_find_parent_resource(dev, r);
                                if (!r->start ||
-                                   !pr ||
-                                   request_resource(pr, r) < 0) {
+                                   pci_claim_resource(dev, idx) < 0) {
                                        printk(KERN_ERR "PCI:"
                                               " Cannot allocate resource"
                                               " region %d of bridge %s\n",
@@ -131,7 +129,7 @@ static void __init pcibios_allocate_resources(int pass)
        struct pci_dev *dev = NULL;
        int idx, disabled;
        u16 command;
-       struct resource *r, *pr;
+       struct resource *r;
 
        for_each_pci_dev(dev) {
                pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -150,8 +148,7 @@ static void __init pcibios_allocate_resources(int pass)
                                    " (f=%lx, d=%d, p=%d)\n",
                                    pci_name(dev), r->start, r->end, r->flags,
                                    disabled, pass);
-                               pr = pci_find_parent_resource(dev, r);
-                               if (!pr || request_resource(pr, r) < 0) {
+                               if (pci_claim_resource(dev, idx) < 0) {
                                        printk(KERN_ERR "PCI:"
                                               " Cannot allocate resource"
                                               " region %d of device %s\n",
@@ -184,7 +181,7 @@ static void __init pcibios_allocate_resources(int pass)
 static int __init pcibios_assign_resources(void)
 {
        struct pci_dev *dev = NULL;
-       struct resource *r, *pr;
+       struct resource *r;
 
        if (!(pci_probe & PCI_ASSIGN_ROMS)) {
                /* Try to use BIOS settings for ROMs, otherwise let
@@ -194,8 +191,7 @@ static int __init pcibios_assign_resources(void)
                        r = &dev->resource[PCI_ROM_RESOURCE];
                        if (!r->flags || !r->start)
                                continue;
-                       pr = pci_find_parent_resource(dev, r);
-                       if (!pr || request_resource(pr, r) < 0) {
+                       if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
                                r->end -= r->start;
                                r->start = 0;
                        }
index 6d8720a0a59945bfa4b3c60d70c4d2ceb48c8a12..a4954fe82094be9404d8bdab02ea2bd1fc38ea3c 100644 (file)
@@ -503,7 +503,7 @@ asmlinkage void __init unit_pci_init(void)
        struct pci_ops *o = &pci_direct_ampci;
        u32 x;
 
-       set_intr_level(XIRQ1, GxICR_LEVEL_3);
+       set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL));
 
        memset(&bus, 0, sizeof(bus));
 
index a76c8e0ab90ff3aaf9a30d573b01259911d53edd..e1becd6b757132bd5664e82c5a676250ad660c3a 100644 (file)
@@ -26,8 +26,10 @@ asmlinkage void __init unit_init(void)
 {
 #ifndef CONFIG_GDBSTUB_ON_TTYSx
        /* set the 16550 interrupt line to level 3 if not being used for GDB */
-       set_intr_level(XIRQ0, GxICR_LEVEL_3);
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+       set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
 #endif
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
 }
 
 /*
@@ -51,7 +53,7 @@ void __init unit_init_IRQ(void)
                switch (GET_XIRQ_TRIGGER(extnum)) {
                case XIRQ_TRIGGER_HILEVEL:
                case XIRQ_TRIGGER_LOWLEVEL:
-                       set_intr_postackable(XIRQ2IRQ(extnum));
+                       mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
                        break;
                default:
                        break;
diff --git a/arch/mn10300/unit-asb2364/Makefile b/arch/mn10300/unit-asb2364/Makefile
new file mode 100644 (file)
index 0000000..b3263ec
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+obj-y   := unit-init.o leds.o irq-fpga.o
+
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/arch/mn10300/unit-asb2364/include/unit/clock.h b/arch/mn10300/unit-asb2364/include/unit/clock.h
new file mode 100644 (file)
index 0000000..d34ac9a
--- /dev/null
@@ -0,0 +1,29 @@
+/* clock.h: unit-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *     23-Feb-2007 MEI Add define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_CLOCK_H
+#define _ASM_UNIT_CLOCK_H
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_IOCLK          100000000UL             /* for DDR800 */
+/*#define MN10300_IOCLK                83333333UL */           /* for DDR667 */
+#define MN10300_IOBCLK         MN10300_IOCLK           /* IOBCLK is equal to IOCLK */
+
+#endif /* !__ASSEMBLY__ */
+
+#define MN10300_WDCLK          27000000UL
+
+#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
new file mode 100644 (file)
index 0000000..7cf1205
--- /dev/null
@@ -0,0 +1,52 @@
+/* ASB2364 FPGA registers
+ */
+
+#ifndef _ASM_UNIT_FPGA_REGS_H
+#define _ASM_UNIT_FPGA_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+#define ASB2364_FPGA_REG_RESET_LAN     __SYSREG(0xa9001300, u16)
+#define ASB2364_FPGA_REG_RESET_UART    __SYSREG(0xa9001304, u16)
+#define ASB2364_FPGA_REG_RESET_I2C     __SYSREG(0xa9001308, u16)
+#define ASB2364_FPGA_REG_RESET_USB     __SYSREG(0xa900130c, u16)
+#define ASB2364_FPGA_REG_RESET_AV      __SYSREG(0xa9001310, u16)
+
+#define ASB2364_FPGA_REG_IRQ(X)                __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_IRQ_LAN       ASB2364_FPGA_REG_IRQ(0)
+#define ASB2364_FPGA_REG_IRQ_UART      ASB2364_FPGA_REG_IRQ(1)
+#define ASB2364_FPGA_REG_IRQ_I2C       ASB2364_FPGA_REG_IRQ(2)
+#define ASB2364_FPGA_REG_IRQ_USB       ASB2364_FPGA_REG_IRQ(3)
+#define ASB2364_FPGA_REG_IRQ_FPGA      ASB2364_FPGA_REG_IRQ(5)
+
+#define ASB2364_FPGA_REG_MASK(X)       __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_MASK_LAN      ASB2364_FPGA_REG_MASK(0)
+#define ASB2364_FPGA_REG_MASK_UART     ASB2364_FPGA_REG_MASK(1)
+#define ASB2364_FPGA_REG_MASK_I2C      ASB2364_FPGA_REG_MASK(2)
+#define ASB2364_FPGA_REG_MASK_USB      ASB2364_FPGA_REG_MASK(3)
+#define ASB2364_FPGA_REG_MASK_FPGA     ASB2364_FPGA_REG_MASK(5)
+
+#define ASB2364_FPGA_REG_CPLD5_SET1    __SYSREG(0xa9002500, u16)
+#define ASB2364_FPGA_REG_CPLD5_SET2    __SYSREG(0xa9002504, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET1    __SYSREG(0xa9002600, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET2    __SYSREG(0xa9002604, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET1    __SYSREG(0xa9002700, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET2    __SYSREG(0xa9002704, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET1    __SYSREG(0xa9002800, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET2    __SYSREG(0xa9002804, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET1    __SYSREG(0xa9002900, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET2    __SYSREG(0xa9002904, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET1   __SYSREG(0xa9002a00, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET2   __SYSREG(0xa9002a04, u16)
+
+#define SyncExBus()                                    \
+       do {                                            \
+               unsigned short w;                       \
+               w = *(volatile short *)0xa9000000;      \
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_UNIT_FPGA_REGS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/irq.h b/arch/mn10300/unit-asb2364/include/unit/irq.h
new file mode 100644 (file)
index 0000000..786148e
--- /dev/null
@@ -0,0 +1,35 @@
+/* ASB2364 FPGA irq numbers
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _UNIT_IRQ_H
+#define _UNIT_IRQ_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#define NR_CPU_IRQS    GxICR_NUM_EXT_IRQS
+#else
+#define NR_CPU_IRQS    GxICR_NUM_IRQS
+#endif
+
+enum {
+       FPGA_LAN_IRQ    = NR_CPU_IRQS,
+       FPGA_UART_IRQ,
+       FPGA_I2C_IRQ,
+       FPGA_USB_IRQ,
+       FPGA_RESERVED_IRQ,
+       FPGA_FPGA_IRQ,
+       NR_IRQS
+};
+
+extern void __init irq_fpga_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _UNIT_IRQ_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/leds.h b/arch/mn10300/unit-asb2364/include/unit/leds.h
new file mode 100644 (file)
index 0000000..03a3933
--- /dev/null
@@ -0,0 +1,54 @@
+/* Unit-specific leds
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_LEDS_H
+#define _ASM_UNIT_LEDS_H
+
+#include <asm/pio-regs.h>
+#include <asm/cpu-regs.h>
+#include <asm/exceptions.h>
+
+#define MN10300_USE_7SEGLEDS   0
+
+#define ASB2364_7SEGLEDS       __SYSREG(0xA9001630, u32)
+
+/*
+ * use the 7-segment LEDs to indicate states
+ */
+
+#if MN10300_USE_7SEGLEDS
+/* flip the 7-segment LEDs between "Gdb-" and "----" */
+#define mn10300_set_gdbleds(ONOFF)                                     \
+       do {                                                            \
+               ASB2364_7SEGLEDS = (ONOFF) ? 0x8543077f : 0x7f7f7f7f;   \
+       } while (0)
+#else
+#define mn10300_set_gdbleds(ONOFF) do {} while (0)
+#endif
+
+#if MN10300_USE_7SEGLEDS
+/* indicate double-fault by displaying "db-f" on the LEDs */
+#define mn10300_set_dbfleds                    \
+       mov     0x43077f1d,d0           ;       \
+       mov     d0,(ASB2364_7SEGLEDS)
+#else
+#define mn10300_set_dbfleds
+#endif
+
+#ifndef __ASSEMBLY__
+extern void peripheral_leds_display_exception(enum exception_code);
+extern void peripheral_leds_led_chase(void);
+extern void peripheral_leds7x4_display_dec(unsigned int, unsigned int);
+extern void peripheral_leds7x4_display_hex(unsigned int, unsigned int);
+extern void debug_to_serial(const char *, int);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_LEDS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/serial.h b/arch/mn10300/unit-asb2364/include/unit/serial.h
new file mode 100644 (file)
index 0000000..7f048bb
--- /dev/null
@@ -0,0 +1,151 @@
+/* Unit-specific 8250 serial ports
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_SERIAL_H
+#define _ASM_UNIT_SERIAL_H
+
+#include <asm/cpu-regs.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+#include <linux/serial_reg.h>
+
+#define SERIAL_PORT0_BASE_ADDRESS      0xA8200000
+
+#define SERIAL_IRQ     XIRQ1   /* single serial (TL16C550C)    (Lo) */
+
+/*
+ * The ASB2364 has an 12.288 MHz clock
+ * for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD (12288000 / 16)
+
+/*
+ * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
+ */
+#ifndef CONFIG_GDBSTUB_ON_TTYSx
+
+#define SERIAL_PORT_DFNS                                               \
+       {                                                               \
+               .baud_base      = BASE_BAUD,                            \
+               .irq            = SERIAL_IRQ,                           \
+               .flags          = STD_COM_FLAGS,                        \
+               .iomem_base     = (u8 *) SERIAL_PORT0_BASE_ADDRESS,     \
+               .iomem_reg_shift = 1,                                   \
+               .io_type        = SERIAL_IO_MEM,                        \
+       },
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#else /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_PORT_DFNS /* stolen by gdb-stub */
+
+#if defined(CONFIG_GDBSTUB_ON_TTYS0)
+#define GDBPORT_SERIAL_RX      __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_RX  * 4, u8)
+#define GDBPORT_SERIAL_TX      __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_TX  * 4, u8)
+#define GDBPORT_SERIAL_DLL     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLL * 4, u8)
+#define GDBPORT_SERIAL_DLM     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLM * 4, u8)
+#define GDBPORT_SERIAL_IER     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 4, u8)
+#define GDBPORT_SERIAL_IIR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IIR * 4, u8)
+#define GDBPORT_SERIAL_FCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_FCR * 4, u8)
+#define GDBPORT_SERIAL_LCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LCR * 4, u8)
+#define GDBPORT_SERIAL_MCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MCR * 4, u8)
+#define GDBPORT_SERIAL_LSR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LSR * 4, u8)
+#define GDBPORT_SERIAL_MSR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MSR * 4, u8)
+#define GDBPORT_SERIAL_SCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_SCR * 4, u8)
+#define GDBPORT_SERIAL_IRQ     SERIAL_IRQ
+
+#elif defined(CONFIG_GDBSTUB_ON_TTYS1)
+#error The ASB2364 does not have a /dev/ttyS1
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+       char ch;
+
+#define LSR_WAIT_FOR(STATE)    \
+       do {} while (!(GDBPORT_SERIAL_LSR & UART_LSR_##STATE))
+#define FLOWCTL_QUERY(LINE)    \
+       ({ GDBPORT_SERIAL_MSR & UART_MSR_##LINE; })
+#define FLOWCTL_WAIT_FOR(LINE) \
+       do {} while (!(GDBPORT_SERIAL_MSR & UART_MSR_##LINE))
+#define FLOWCTL_CLEAR(LINE)    \
+       do { GDBPORT_SERIAL_MCR &= ~UART_MCR_##LINE; } while (0)
+#define FLOWCTL_SET(LINE)      \
+       do { GDBPORT_SERIAL_MCR |= UART_MCR_##LINE; } while (0)
+
+       FLOWCTL_SET(DTR);
+
+       for (; n > 0; n--) {
+               LSR_WAIT_FOR(THRE);
+               FLOWCTL_WAIT_FOR(CTS);
+
+               ch = *p++;
+               if (ch == 0x0a) {
+                       GDBPORT_SERIAL_TX = 0x0d;
+                       LSR_WAIT_FOR(THRE);
+                       FLOWCTL_WAIT_FOR(CTS);
+               }
+               GDBPORT_SERIAL_TX = ch;
+       }
+
+       FLOWCTL_CLEAR(DTR);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_INITIALIZE                                      \
+do {                                                           \
+       /* release reset */                                     \
+       ASB2364_FPGA_REG_RESET_UART = 0x0001;                   \
+       SyncExBus();                                            \
+} while (0)
+
+#define SERIAL_CHECK_INTERRUPT                                 \
+do {                                                           \
+       if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) == 0x0001) {   \
+               return IRQ_NONE;                                \
+       }                                                       \
+} while (0)
+
+#define SERIAL_CLEAR_INTERRUPT                                 \
+do {                                                           \
+       ASB2364_FPGA_REG_IRQ_UART = 0x0001;                     \
+       SyncExBus();                                            \
+} while (0)
+
+#define SERIAL_SET_INT_MASK                                    \
+do {                                                           \
+       ASB2364_FPGA_REG_MASK_UART = 0x0001;                    \
+       SyncExBus();                                            \
+} while (0)
+
+#define SERIAL_CLEAR_INT_MASK                                  \
+do {                                                           \
+       ASB2364_FPGA_REG_MASK_UART = 0x0000;                    \
+       SyncExBus();                                            \
+} while (0)
+
+#endif /* _ASM_UNIT_SERIAL_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/smsc911x.h b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
new file mode 100644 (file)
index 0000000..4c1ede5
--- /dev/null
@@ -0,0 +1,171 @@
+/* Support for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_SMSC911X_H
+#define _ASM_UNIT_SMSC911X_H
+
+#include <linux/netdevice.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+
+#define MN10300_USE_EXT_EEPROM
+
+
+#define SMSC911X_BASE          0xA8000000UL
+#define SMSC911X_BASE_END      0xA8000100UL
+#define SMSC911X_IRQ           FPGA_LAN_IRQ
+
+/*
+ * Allow the FPGA to be initialised by the SMSC911x driver
+ */
+#undef SMSC_INITIALIZE
+#define SMSC_INITIALIZE()                                      \
+do {                                                           \
+       /* release reset */                                     \
+       ASB2364_FPGA_REG_RESET_LAN = 0x0001;                    \
+       SyncExBus();                                            \
+} while (0)
+
+#ifdef MN10300_USE_EXT_EEPROM
+#include <linux/delay.h>
+#include <unit/clock.h>
+
+#define EEPROM_ADDRESS 0xA0
+#define MAC_OFFSET     0x0008
+#define USE_IIC_CH     0       /* 0 or 1 */
+#define IIC_OFFSET     (0x80000 * USE_IIC_CH)
+#define IIC_DTRM       __SYSREG(0xd8400000 + IIC_OFFSET, u32)
+#define IIC_DREC       __SYSREG(0xd8400004 + IIC_OFFSET, u32)
+#define IIC_MYADD      __SYSREG(0xd8400008 + IIC_OFFSET, u32)
+#define IIC_CLK                __SYSREG(0xd840000c + IIC_OFFSET, u32)
+#define IIC_BRST       __SYSREG(0xd8400010 + IIC_OFFSET, u32)
+#define IIC_HOLD       __SYSREG(0xd8400014 + IIC_OFFSET, u32)
+#define IIC_BSTS       __SYSREG(0xd8400018 + IIC_OFFSET, u32)
+#define IIC_ICR                __SYSREG(0xd4000080 + 4 * USE_IIC_CH, u16)
+
+#define IIC_CLK_PLS    ((unsigned short)(MN10300_IOCLK / 100000 - 1))
+#define IIC_CLK_LOW    ((unsigned short)(IIC_CLK_PLS / 2))
+
+#define SYS_IIC_DTRM_Bit_STA   ((unsigned short)0x0400)
+#define SYS_IIC_DTRM_Bit_STO   ((unsigned short)0x0200)
+#define SYS_IIC_DTRM_Bit_ACK   ((unsigned short)0x0100)
+#define SYS_IIC_DTRM_Bit_DATA  ((unsigned short)0x00FF)
+
+static inline void POLL_INT_REQ(volatile u16 *icr)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       while (!(*icr & GxICR_REQUEST))
+               ;
+       flags = arch_local_cli_save();
+       tmp = *icr;
+       *icr = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+       tmp = *icr;
+       arch_local_irq_restore(flags);
+}
+
+/*
+ * Implement the SMSC911x hook for MAC address retrieval
+ */
+#undef smsc_get_mac
+static inline int smsc_get_mac(struct net_device *dev)
+{
+       unsigned char *mac_buf = dev->dev_addr;
+       int i;
+       unsigned short value;
+       unsigned int data;
+       int mac_length = 6;
+       int check;
+       u16 orig_gicr, tmp;
+       unsigned long flags;
+
+       /* save original GnICR and clear GnICR.IE */
+       flags = arch_local_cli_save();
+       orig_gicr = IIC_ICR;
+       IIC_ICR = orig_gicr & GxICR_LEVEL;
+       tmp = IIC_ICR;
+       arch_local_irq_restore(flags);
+
+       IIC_MYADD = 0x00000008;
+       IIC_CLK = (IIC_CLK_LOW << 16) + (IIC_CLK_PLS);
+       /* bus hung recovery */
+
+       while (1) {
+               check = 0;
+               for (i = 0; i < 3; i++) {
+                       if ((IIC_BSTS & 0x00000003) == 0x00000003)
+                               check++;
+                       udelay(3);
+               }
+
+               if (check == 3) {
+                       IIC_BRST = 0x00000003;
+                       break;
+               } else {
+                       for (i = 0; i < 3; i++) {
+                               IIC_BRST = 0x00000002;
+                               udelay(8);
+                               IIC_BRST = 0x00000003;
+                               udelay(8);
+                       }
+               }
+       }
+
+       IIC_BRST = 0x00000002;
+       IIC_BRST = 0x00000003;
+
+       value   =  SYS_IIC_DTRM_Bit_STA | SYS_IIC_DTRM_Bit_ACK;
+       value   |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+                   (unsigned short)0x0000);
+       IIC_DTRM = value;
+       POLL_INT_REQ(&IIC_ICR);
+
+       /** send offset of MAC address in EEPROM **/
+       IIC_DTRM = (unsigned char)((MAC_OFFSET & 0xFF00) >> 8);
+       POLL_INT_REQ(&IIC_ICR);
+
+       IIC_DTRM = (unsigned char)(MAC_OFFSET & 0x00FF);
+       POLL_INT_REQ(&IIC_ICR);
+
+       udelay(1000);
+
+       value   =  SYS_IIC_DTRM_Bit_STA;
+       value   |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+                   (unsigned short)0x0001);
+       IIC_DTRM = value;
+       POLL_INT_REQ(&IIC_ICR);
+
+       IIC_DTRM = 0x00000000;
+       while (mac_length > 0) {
+               POLL_INT_REQ(&IIC_ICR);
+
+               data = IIC_DREC;
+               mac_length--;
+               if (mac_length == 0)
+                       value = 0x00000300;     /* stop IIC bus */
+               else if (mac_length == 1)
+                       value = 0x00000100;     /* no ack */
+               else
+                       value = 0x00000000;     /* ack */
+               IIC_DTRM = value;
+               *mac_buf++ = (unsigned char)(data & 0xff);
+       }
+
+       /* restore GnICR.LV and GnICR.IE */
+       flags = arch_local_cli_save();
+       IIC_ICR = (orig_gicr & (GxICR_LEVEL | GxICR_ENABLE));
+       tmp = IIC_ICR;
+       arch_local_irq_restore(flags);
+
+       return 0;
+}
+#endif /* MN10300_USE_EXT_EEPROM */
+#endif /* _ASM_UNIT_SMSC911X_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
new file mode 100644 (file)
index 0000000..ddb7ed0
--- /dev/null
@@ -0,0 +1,159 @@
+/* timex.h: MN2WS0038 architecture timer specifications
+ *
+ * Copyright (C) 2002, 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_TIMEX_H
+#define _ASM_UNIT_TIMEX_H
+
+#ifndef __ASSEMBLY__
+#include <linux/irq.h>
+#endif /* __ASSEMBLY__ */
+
+#include <asm/timer-regs.h>
+#include <unit/clock.h>
+#include <asm/param.h>
+
+/*
+ * jiffies counter specifications
+ */
+
+#define        TMJCBR_MAX              0xffffff        /* 24bit */
+#define        TMJCIRQ                 TMTIRQ
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_SRC_IOBCLK     MN10300_IOBCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+
+#define MN10300_JCCLK          (MN10300_SRC_IOBCLK)
+#define MN10300_TSCCLK         (MN10300_SRC_IOBCLK)
+
+#define MN10300_JC_PER_HZ      ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ     ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+/* Check bit width of MTM interval value that sets base register */
+#if (MN10300_JC_PER_HZ - 1) > TMJCBR_MAX
+# error MTM tick timer interval value is overflow.
+#endif
+
+static inline void stop_jiffies_counter(void)
+{
+       u16 tmp;
+       TMTMD = 0;
+       tmp = TMTMD;
+}
+
+static inline void reload_jiffies_counter(u32 cnt)
+{
+       u32 tmp;
+
+       TMTBR = cnt;
+       tmp = TMTBR;
+
+       TMTMD = TMTMD_TMTLDE;
+       TMTMD = TMTMD_TMTCNE;
+       tmp = TMTMD;
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS) && \
+    !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/*
+ * If we aren't using broadcasting, each core needs its own event timer.
+ * Since CPU0 uses the tick timer which is 24-bits, we use timer 4 & 5
+ * cascaded to 32-bits for CPU1 (but only really use 24-bits to match
+ * CPU0).
+ */
+
+#define        TMJC1IRQ                TM5IRQ
+
+static inline void stop_jiffies_counter1(void)
+{
+       u8 tmp;
+       TM4MD = 0;
+       TM5MD = 0;
+       tmp = TM4MD;
+       tmp = TM5MD;
+}
+
+static inline void reload_jiffies_counter1(u32 cnt)
+{
+       u32 tmp;
+
+       TM45BR = cnt;
+       tmp = TM45BR;
+
+       TM4MD = TM4MD_INIT_COUNTER;
+       tmp = TM4MD;
+
+       TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_INIT_COUNTER;
+       TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_COUNT_ENABLE;
+       tmp = TM5MD;
+
+       TM4MD = TM4MD_COUNT_ENABLE;
+       tmp = TM4MD;
+}
+#endif /* CONFIG_SMP&GENERIC_CLOCKEVENTS&!GENERIC_CLOCKEVENTS_BROADCAST */
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * timestamp counter specifications
+ */
+#define        TMTSCBR_MAX     0xffffffff
+
+#ifndef __ASSEMBLY__
+
+/* Use 32-bit timestamp counter */
+#define        TMTSCMD         TMSMD
+#define        TMTSCBR         TMSBR
+#define        TMTSCBC         TMSBC
+#define        TMTSCICR        TMSICR
+
+static inline void startup_timestamp_counter(void)
+{
+       u32 sync;
+
+       /* set up TMS(Timestamp) 32bit timer register to count real time
+        * - count down from 4Gig-1 to 0 and wrap at IOBCLK rate
+        */
+
+       TMTSCBR = TMTSCBR_MAX;
+       sync = TMTSCBR;
+
+       TMTSCICR = 0;
+       sync = TMTSCICR;
+
+       TMTSCMD = TMTMD_TMTLDE;
+       TMTSCMD = TMTMD_TMTCNE;
+       sync = TMTSCMD;
+}
+
+static inline void shutdown_timestamp_counter(void)
+{
+       TMTSCMD = 0;
+}
+
+/*
+ * we use a cascaded pair of 16-bit down-counting timers to count I/O
+ * clock cycles for the purposes of time keeping
+ */
+typedef unsigned long cycles_t;
+
+static inline cycles_t read_timestamp_counter(void)
+{
+       return (cycles_t)~TMTSCBC;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_TIMEX_H */
diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c
new file mode 100644 (file)
index 0000000..fcf2975
--- /dev/null
@@ -0,0 +1,96 @@
+/* ASB2364 FPGA interrupt multiplexing
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * FPGA PIC operations
+ */
+static void asb2364_fpga_mask(unsigned int irq)
+{
+       ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+}
+
+static void asb2364_fpga_ack(unsigned int irq)
+{
+       ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+}
+
+static void asb2364_fpga_mask_ack(unsigned int irq)
+{
+       ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+       ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+}
+
+static void asb2364_fpga_unmask(unsigned int irq)
+{
+       ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0000;
+       SyncExBus();
+}
+
+static struct irq_chip asb2364_fpga_pic = {
+       .name           = "fpga",
+       .ack            = asb2364_fpga_ack,
+       .mask           = asb2364_fpga_mask,
+       .mask_ack       = asb2364_fpga_mask_ack,
+       .unmask         = asb2364_fpga_unmask,
+};
+
+/*
+ * FPGA PIC interrupt handler
+ */
+static irqreturn_t fpga_interrupt(int irq, void *_mask)
+{
+       if ((ASB2364_FPGA_REG_IRQ_LAN  & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_LAN_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_UART_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_I2C  & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_I2C_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_USB  & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_USB_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_FPGA & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_FPGA_IRQ);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Define an interrupt action for each FPGA PIC output
+ */
+static struct irqaction fpga_irq[]  = {
+       [0] = {
+               .handler        = fpga_interrupt,
+               .flags          = IRQF_DISABLED | IRQF_SHARED,
+               .name           = "fpga",
+       },
+};
+
+/*
+ * Initialise the FPGA's PIC
+ */
+void __init irq_fpga_init(void)
+{
+       int irq;
+
+       for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++)
+               set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq);
+
+       /* the FPGA drives the XIRQ1 input on the CPU PIC */
+       setup_irq(XIRQ1, &fpga_irq[0]);
+}
diff --git a/arch/mn10300/unit-asb2364/leds.c b/arch/mn10300/unit-asb2364/leds.c
new file mode 100644 (file)
index 0000000..1ff830c
--- /dev/null
@@ -0,0 +1,98 @@
+/* leds.c: ASB2364 peripheral 7seg LEDs x4 support
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/intctl-regs.h>
+#include <asm/rtc-regs.h>
+#include <unit/leds.h>
+
+#if MN10300_USE_7SEGLEDS
+static const u8 asb2364_led_hex_tbl[16] = {
+       0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0,
+       0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c
+};
+
+static const u32 asb2364_led_chase_tbl[6] = {
+       ~0x02020202,    /* top          - segA */
+       ~0x04040404,    /* right top    - segB */
+       ~0x08080808,    /* right bottom - segC */
+       ~0x10101010,    /* bottom       - segD */
+       ~0x20202020,    /* left bottom  - segE */
+       ~0x40404040,    /* left top     - segF */
+};
+
+static unsigned asb2364_led_chase;
+
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points)
+{
+       u32 leds;
+
+       leds = asb2364_led_hex_tbl[(val/1000) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/100) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/10) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[val % 10];
+       leds |= points^0x01010101;
+
+       ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points)
+{
+       u32 leds;
+
+       leds = asb2364_led_hex_tbl[(val/1000) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/100) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/10) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[val % 10];
+       leds |= points^0x01010101;
+
+       ASB2364_7SEGLEDS = leds;
+}
+
+/* display triple horizontal bar and exception code */
+void peripheral_leds_display_exception(enum exception_code code)
+{
+       u32 leds;
+
+       leds = asb2364_led_hex_tbl[(code/0x100) % 0x10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(code/0x10) % 0x10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[code % 0x10];
+       leds |= 0x6d010101;
+
+       ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds_led_chase(void)
+{
+       ASB2364_7SEGLEDS = asb2364_led_chase_tbl[asb2364_led_chase];
+       asb2364_led_chase++;
+       if (asb2364_led_chase >= 6)
+               asb2364_led_chase = 0;
+}
+#else  /* MN10300_USE_7SEGLEDS */
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { }
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { }
+void peripheral_leds_display_exception(enum exception_code code) { }
+void peripheral_leds_led_chase(void) { }
+#endif /* MN10300_USE_7SEGLEDS */
diff --git a/arch/mn10300/unit-asb2364/smsc911x.c b/arch/mn10300/unit-asb2364/smsc911x.c
new file mode 100644 (file)
index 0000000..544a73e
--- /dev/null
@@ -0,0 +1,58 @@
+/* Specification for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/smsc911x.h>
+#include <unit/smsc911x.h>
+
+static struct smsc911x_platform_config smsc911x_config = {
+       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+       .flags          = SMSC911X_USE_32BIT,
+};
+
+static struct resource smsc911x_resources[] = {
+       [0] = {
+               .start  = SMSC911X_BASE,
+               .end    = SMSC911X_BASE_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = SMSC911X_IRQ,
+               .end    = SMSC911X_IRQ,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device smsc911x_device = {
+       .name           = "smsc911x",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(smsc911x_resources),
+       .resource       = smsc911x_resources,
+       .dev            = {
+               .platform_data = &smsc911x_config,
+       }
+};
+
+/*
+ * add platform devices
+ */
+static int __init unit_device_init(void)
+{
+       platform_device_register(&smsc911x_device);
+       return 0;
+}
+
+device_initcall(unit_device_init);
diff --git a/arch/mn10300/unit-asb2364/unit-init.c b/arch/mn10300/unit-asb2364/unit-init.c
new file mode 100644 (file)
index 0000000..1144080
--- /dev/null
@@ -0,0 +1,88 @@
+/* ASB2364 initialisation
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/intctl-regs.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * initialise some of the unit hardware before gdbstub is set up
+ */
+asmlinkage void __init unit_init(void)
+{
+       /* set up the external interrupts */
+
+       /* XIRQ[0]: NAND RXBY */
+       /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */
+
+       /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */
+       SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL);
+
+       /* XIRQ[2]: Extend Slot 1-9 */
+       /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) &&    \
+    defined(CONFIG_ETHERNET_IRQ_LEVEL) &&      \
+    (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL)
+# error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL
+#endif
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL)
+       set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#elif defined(CONFIG_ETHERNET_IRQ_LEVEL)
+       set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
+}
+
+/*
+ * initialise the rest of the unit hardware after gdbstub is ready
+ */
+asmlinkage void __init unit_setup(void)
+{
+
+}
+
+/*
+ * initialise the external interrupts used by a unit of this type
+ */
+void __init unit_init_IRQ(void)
+{
+       unsigned int extnum;
+
+       for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) {
+               switch (GET_XIRQ_TRIGGER(extnum)) {
+                       /* LEVEL triggered interrupts should be made
+                        * post-ACK'able as they hold their lines until
+                        * serviced
+                        */
+               case XIRQ_TRIGGER_HILEVEL:
+               case XIRQ_TRIGGER_LOWLEVEL:
+                       mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
+                       break;
+               default:
+                       break;
+               }
+       }
+
+#define IRQCTL __SYSREG(0xd5000090, u32)
+       IRQCTL |= 0x02;
+
+       irq_fpga_init();
+}
index c4f49e45129dee568e4d75c99e26934fac16e523..2905b1f52d30b0c135b8794444473830acf2b620 100644 (file)
@@ -110,7 +110,8 @@ void user_enable_block_step(struct task_struct *task)
        pa_psw(task)->l = 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long tmp;
        long ret = -EIO;
@@ -120,11 +121,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        /* Read the word at location addr in the USER area.  For ptraced
           processes, the kernel saves all regs on a syscall. */
        case PTRACE_PEEKUSR:
-               if ((addr & (sizeof(long)-1)) ||
-                   (unsigned long) addr >= sizeof(struct pt_regs))
+               if ((addr & (sizeof(unsigned long)-1)) ||
+                    addr >= sizeof(struct pt_regs))
                        break;
                tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
-               ret = put_user(tmp, (unsigned long *) data);
+               ret = put_user(tmp, (unsigned long __user *) data);
                break;
 
        /* Write the word at location addr in the USER area.  This will need
@@ -151,8 +152,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        break;
                }
 
-               if ((addr & (sizeof(long)-1)) ||
-                   (unsigned long) addr >= sizeof(struct pt_regs))
+               if ((addr & (sizeof(unsigned long)-1)) ||
+                    addr >= sizeof(struct pt_regs))
                        break;
                if ((addr >= PT_GR1 && addr <= PT_GR31) ||
                                addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
index 8bdc6a9e57733109eb75ec47d664cd37be22e94f..1cf20bdfbecaada5bb2b88d439d776c32af2558c 100644 (file)
@@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
 }
 
 /*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
  */
 extern u64 __cputime_msec_factor;
 
-static inline unsigned long cputime_to_msecs(const cputime_t ct)
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
 {
-       return mulhdu(ct, __cputime_msec_factor);
+       return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
 }
 
-static inline cputime_t msecs_to_cputime(const unsigned long ms)
+static inline cputime_t usecs_to_cputime(const unsigned long us)
 {
        cputime_t ct;
        unsigned long sec;
 
        /* have to be a little careful about overflow */
-       ct = ms % 1000;
-       sec = ms / 1000;
+       ct = us % 1000000;
+       sec = us / 1000000;
        if (ct) {
                ct *= tb_ticks_per_sec;
                do_div(ct, 1000);
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
deleted file mode 100644 (file)
index debc5ed..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Freescale MPC83XX / MPC85XX DMA Controller
- *
- * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
-#define __ARCH_POWERPC_ASM_FSLDMA_H__
-
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-
-/*
- * Definitions for the Freescale DMA controller's DMA_SLAVE implemention
- *
- * The Freescale DMA_SLAVE implementation was designed to handle many-to-many
- * transfers. An example usage would be an accelerated copy between two
- * scatterlists. Another example use would be an accelerated copy from
- * multiple non-contiguous device buffers into a single scatterlist.
- *
- * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
- * structure contains a list of hardware addresses that should be copied
- * to/from the scatterlist passed into device_prep_slave_sg(). The structure
- * also has some fields to enable hardware-specific features.
- */
-
-/**
- * struct fsl_dma_hw_addr
- * @entry: linked list entry
- * @address: the hardware address
- * @length: length to transfer
- *
- * Holds a single physical hardware address / length pair for use
- * with the DMAEngine DMA_SLAVE API.
- */
-struct fsl_dma_hw_addr {
-       struct list_head entry;
-
-       dma_addr_t address;
-       size_t length;
-};
-
-/**
- * struct fsl_dma_slave
- * @addresses: a linked list of struct fsl_dma_hw_addr structures
- * @request_count: value for DMA request count
- * @src_loop_size: setup and enable constant source-address DMA transfers
- * @dst_loop_size: setup and enable constant destination address DMA transfers
- * @external_start: enable externally started DMA transfers
- * @external_pause: enable externally paused DMA transfers
- *
- * Holds a list of address / length pairs for use with the DMAEngine
- * DMA_SLAVE API implementation for the Freescale DMA controller.
- */
-struct fsl_dma_slave {
-
-       /* List of hardware address/length pairs */
-       struct list_head addresses;
-
-       /* Support for extra controller features */
-       unsigned int request_count;
-       unsigned int src_loop_size;
-       unsigned int dst_loop_size;
-       bool external_start;
-       bool external_pause;
-};
-
-/**
- * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
- * @slave: the &struct fsl_dma_slave to add to
- * @address: the hardware address to add
- * @length: the length of bytes to transfer from @address
- *
- * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
- * success, -ERRNO otherwise.
- */
-static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
-                                      dma_addr_t address, size_t length)
-{
-       struct fsl_dma_hw_addr *addr;
-
-       addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
-       if (!addr)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&addr->entry);
-       addr->address = address;
-       addr->length = length;
-
-       list_add_tail(&addr->entry, &slave->addresses);
-       return 0;
-}
-
-/**
- * fsl_dma_slave_free - free a struct fsl_dma_slave
- * @slave: the struct fsl_dma_slave to free
- *
- * Free a struct fsl_dma_slave and all associated address/length pairs
- */
-static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
-{
-       struct fsl_dma_hw_addr *addr, *tmp;
-
-       if (slave) {
-               list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
-                       list_del(&addr->entry);
-                       kfree(addr);
-               }
-
-               kfree(slave);
-       }
-}
-
-/**
- * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
- * @gfp: the flags to pass to kmalloc when allocating this structure
- *
- * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
- * struct fsl_dma_slave on success, or NULL on failure.
- */
-static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
-{
-       struct fsl_dma_slave *slave;
-
-       slave = kzalloc(sizeof(*slave), gfp);
-       if (!slave)
-               return NULL;
-
-       INIT_LIST_HEAD(&slave->addresses);
-       return slave;
-}
-
-#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
index 286d9783d93f3ed2806036e14b5b6690d159865a..a9b32967cff64d81700c252f9a04ced31a389271 100644 (file)
@@ -1406,37 +1406,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
  * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
  * we mark them as obsolete now, they will be removed in a future version
  */
-static long arch_ptrace_old(struct task_struct *child, long request, long addr,
-                           long data)
+static long arch_ptrace_old(struct task_struct *child, long request,
+                           unsigned long addr, unsigned long data)
 {
+       void __user *datavp = (void __user *) data;
+
        switch (request) {
        case PPC_PTRACE_GETREGS:        /* Get GPRs 0 - 31. */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_GPR, 0, 32 * sizeof(long),
-                                          (void __user *) data);
+                                          datavp);
 
        case PPC_PTRACE_SETREGS:        /* Set GPRs 0 - 31. */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_GPR, 0, 32 * sizeof(long),
-                                            (const void __user *) data);
+                                            datavp);
 
        case PPC_PTRACE_GETFPREGS:      /* Get FPRs 0 - 31. */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_FPR, 0, 32 * sizeof(double),
-                                          (void __user *) data);
+                                          datavp);
 
        case PPC_PTRACE_SETFPREGS:      /* Set FPRs 0 - 31. */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_FPR, 0, 32 * sizeof(double),
-                                            (const void __user *) data);
+                                            datavp);
        }
 
        return -EPERM;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret = -EPERM;
+       void __user *datavp = (void __user *) data;
+       unsigned long __user *datalp = datavp;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
@@ -1446,11 +1451,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = -EIO;
                /* convert to index and check */
 #ifdef CONFIG_PPC32
-               index = (unsigned long) addr >> 2;
+               index = addr >> 2;
                if ((addr & 3) || (index > PT_FPSCR)
                    || (child->thread.regs == NULL))
 #else
-               index = (unsigned long) addr >> 3;
+               index = addr >> 3;
                if ((addr & 7) || (index > PT_FPSCR))
 #endif
                        break;
@@ -1463,7 +1468,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        tmp = ((unsigned long *)child->thread.fpr)
                                [TS_FPRWIDTH * (index - PT_FPR0)];
                }
-               ret = put_user(tmp,(unsigned long __user *) data);
+               ret = put_user(tmp, datalp);
                break;
        }
 
@@ -1474,11 +1479,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = -EIO;
                /* convert to index and check */
 #ifdef CONFIG_PPC32
-               index = (unsigned long) addr >> 2;
+               index = addr >> 2;
                if ((addr & 3) || (index > PT_FPSCR)
                    || (child->thread.regs == NULL))
 #else
-               index = (unsigned long) addr >> 3;
+               index = addr >> 3;
                if ((addr & 7) || (index > PT_FPSCR))
 #endif
                        break;
@@ -1525,11 +1530,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                dbginfo.features = 0;
 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 
-               if (!access_ok(VERIFY_WRITE, data,
+               if (!access_ok(VERIFY_WRITE, datavp,
                               sizeof(struct ppc_debug_info)))
                        return -EFAULT;
-               ret = __copy_to_user((struct ppc_debug_info __user *)data,
-                                    &dbginfo, sizeof(struct ppc_debug_info)) ?
+               ret = __copy_to_user(datavp, &dbginfo,
+                                    sizeof(struct ppc_debug_info)) ?
                      -EFAULT : 0;
                break;
        }
@@ -1537,11 +1542,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PPC_PTRACE_SETHWDEBUG: {
                struct ppc_hw_breakpoint bp_info;
 
-               if (!access_ok(VERIFY_READ, data,
+               if (!access_ok(VERIFY_READ, datavp,
                               sizeof(struct ppc_hw_breakpoint)))
                        return -EFAULT;
-               ret = __copy_from_user(&bp_info,
-                                      (struct ppc_hw_breakpoint __user *)data,
+               ret = __copy_from_user(&bp_info, datavp,
                                       sizeof(struct ppc_hw_breakpoint)) ?
                      -EFAULT : 0;
                if (!ret)
@@ -1560,11 +1564,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                if (addr > 0)
                        break;
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-               ret = put_user(child->thread.dac1,
-                              (unsigned long __user *)data);
+               ret = put_user(child->thread.dac1, datalp);
 #else
-               ret = put_user(child->thread.dabr,
-                              (unsigned long __user *)data);
+               ret = put_user(child->thread.dabr, datalp);
 #endif
                break;
        }
@@ -1580,7 +1582,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_GPR,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *) data);
+                                          datavp);
 
 #ifdef CONFIG_PPC64
        case PTRACE_SETREGS64:
@@ -1589,19 +1591,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_GPR,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *) data);
+                                            datavp);
 
        case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_FPR,
                                           0, sizeof(elf_fpregset_t),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_FPR,
                                             0, sizeof(elf_fpregset_t),
-                                            (const void __user *) data);
+                                            datavp);
 
 #ifdef CONFIG_ALTIVEC
        case PTRACE_GETVRREGS:
@@ -1609,40 +1611,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                           REGSET_VMX,
                                           0, (33 * sizeof(vector128) +
                                               sizeof(u32)),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETVRREGS:
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_VMX,
                                             0, (33 * sizeof(vector128) +
                                                 sizeof(u32)),
-                                            (const void __user *) data);
+                                            datavp);
 #endif
 #ifdef CONFIG_VSX
        case PTRACE_GETVSRREGS:
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_VSX,
                                           0, 32 * sizeof(double),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETVSRREGS:
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_VSX,
                                             0, 32 * sizeof(double),
-                                            (const void __user *) data);
+                                            datavp);
 #endif
 #ifdef CONFIG_SPE
        case PTRACE_GETEVRREGS:
                /* Get the child spe register state. */
                return copy_regset_to_user(child, &user_ppc_native_view,
                                           REGSET_SPE, 0, 35 * sizeof(u32),
-                                          (void __user *) data);
+                                          datavp);
 
        case PTRACE_SETEVRREGS:
                /* Set the child spe register state. */
                return copy_regset_from_user(child, &user_ppc_native_view,
                                             REGSET_SPE, 0, 35 * sizeof(u32),
-                                            (const void __user *) data);
+                                            datavp);
 #endif
 
        /* Old reverse args ptrace callss */
index b0848b462bbceb6678a071889cc774e40f77cafd..e7450bdbe83a9380264fc149c4831b587226cd36 100644 (file)
@@ -62,7 +62,7 @@ void __kunmap_atomic(void *kvaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
        {
@@ -79,6 +79,8 @@ void __kunmap_atomic(void *kvaddr)
                local_flush_tlb_page(NULL, vaddr);
        }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 412763672d23faa754e26d5b655cccd21046f4ad..9725369d432a752bcf12436f708a64fcd1c540e3 100644 (file)
@@ -50,6 +50,7 @@
 #define RIO_ATMU_REGS_OFFSET   0x10c00
 #define RIO_P_MSG_REGS_OFFSET  0x11000
 #define RIO_S_MSG_REGS_OFFSET  0x13000
+#define RIO_GCCSR              0x13c
 #define RIO_ESCSR              0x158
 #define RIO_CCSR               0x15c
 #define RIO_LTLEDCSR           0x0608
@@ -87,6 +88,9 @@
 #define RIO_IPWSR_PWD          0x00000008
 #define RIO_IPWSR_PWB          0x00000004
 
+#define RIO_EPWISR_PINT                0x80000000
+#define RIO_EPWISR_PW          0x00000001
+
 #define RIO_MSG_DESC_SIZE      32
 #define RIO_MSG_BUFFER_SIZE    4096
 #define RIO_MIN_TX_RING_SIZE   2
@@ -1082,18 +1086,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
        struct rio_priv *priv = port->priv;
        u32 epwisr, tmp;
 
-       ipwmr = in_be32(&priv->msg_regs->pwmr);
-       ipwsr = in_be32(&priv->msg_regs->pwsr);
-
        epwisr = in_be32(priv->regs_win + RIO_EPWISR);
-       if (epwisr & 0x80000000) {
-               tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
-               pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
-               out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
-       }
+       if (!(epwisr & RIO_EPWISR_PW))
+               goto pw_done;
 
-       if (!(epwisr & 0x00000001))
-               return IRQ_HANDLED;
+       ipwmr = in_be32(&priv->msg_regs->pwmr);
+       ipwsr = in_be32(&priv->msg_regs->pwsr);
 
 #ifdef DEBUG_PW
        pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
@@ -1109,20 +1107,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
                pr_debug(" PWB");
        pr_debug(" )\n");
 #endif
-       out_be32(&priv->msg_regs->pwsr,
-                ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
-       if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
-               priv->port_write_msg.err_count++;
-               pr_info("RIO: Port-Write Transaction Err (%d)\n",
-                        priv->port_write_msg.err_count);
-       }
-       if (ipwsr & RIO_IPWSR_PWD) {
-               priv->port_write_msg.discard_count++;
-               pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
-                        priv->port_write_msg.discard_count);
-       }
-
        /* Schedule deferred processing if PW was received */
        if (ipwsr & RIO_IPWSR_QFI) {
                /* Save PW message (if there is room in FIFO),
@@ -1134,16 +1118,43 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
                                 RIO_PW_MSG_SIZE);
                } else {
                        priv->port_write_msg.discard_count++;
-                       pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+                       pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
                                 priv->port_write_msg.discard_count);
                }
+               /* Clear interrupt and issue Clear Queue command. This allows
+                * another port-write to be received.
+                */
+               out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
+               out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
                schedule_work(&priv->pw_work);
        }
 
-       /* Issue Clear Queue command. This allows another
-        * port-write to be received.
-        */
-       out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+       if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+               priv->port_write_msg.err_count++;
+               pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+                        priv->port_write_msg.err_count);
+               /* Clear Transaction Error: port-write controller should be
+                * disabled when clearing this error
+                */
+               out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+               out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
+               out_be32(&priv->msg_regs->pwmr, ipwmr);
+       }
+
+       if (ipwsr & RIO_IPWSR_PWD) {
+               priv->port_write_msg.discard_count++;
+               pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+                        priv->port_write_msg.discard_count);
+               out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
+       }
+
+pw_done:
+       if (epwisr & RIO_EPWISR_PINT) {
+               tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+               pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+               out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+       }
 
        return IRQ_HANDLED;
 }
@@ -1461,6 +1472,7 @@ int fsl_rio_setup(struct platform_device *dev)
        port->host_deviceid = fsl_rio_get_hdid(port->id);
 
        port->priv = priv;
+       port->phys_efptr = 0x100;
        rio_register_mport(port);
 
        priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
@@ -1508,6 +1520,12 @@ int fsl_rio_setup(struct platform_device *dev)
        dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
                        port->sys_size ? 65536 : 256);
 
+       if (port->host_deviceid >= 0)
+               out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+                       RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+       else
+               out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
+
        priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
                                        + RIO_ATMU_REGS_OFFSET);
        priv->maint_atmu_regs = priv->atmu_regs + 1;
index 8b1a52a137c5b2558bd9223cf87ac16708aae7c7..40e2ab0fa3f0bbb4416e10d3c5314a7f1e03dd16 100644 (file)
@@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime)
 }
 
 /*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
  */
 static inline unsigned int
-cputime_to_msecs(const cputime_t cputime)
+cputime_to_usecs(const cputime_t cputime)
 {
-       return cputime_div(cputime, 4096000);
+       return cputime_div(cputime, 4096);
 }
 
 static inline cputime_t
-msecs_to_cputime(const unsigned int m)
+usecs_to_cputime(const unsigned int m)
 {
-       return (cputime_t) m * 4096000;
+       return (cputime_t) m * 4096;
 }
 
 /*
index 83339d33c4b127b0aaf2e36a7e56aef91b57bbfa..019bb714db49f758d4fef00f5671c218cab171a4 100644 (file)
@@ -343,7 +343,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
        return __poke_user(child, addr, data);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        ptrace_area parea; 
        int copied, ret;
index 174c6422b096e9cb9c183232a8809541308929b9..55836188b217c170c3141558d843e1d4259bf165 100644 (file)
@@ -325,7 +325,8 @@ void ptrace_disable(struct task_struct *child)
 }
 
 long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+           unsigned long addr, unsigned long data)
 {
        int ret;
        unsigned long __user *datap = (void __user *)data;
@@ -335,14 +336,14 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = copy_regset_to_user(child, &user_score_native_view,
                                                REGSET_GENERAL,
                                                0, sizeof(struct pt_regs),
-                                               (void __user *)datap);
+                                               datap);
                break;
 
        case PTRACE_SETREGS:
                ret = copy_regset_from_user(child, &user_score_native_view,
                                                REGSET_GENERAL,
                                                0, sizeof(struct pt_regs),
-                                               (const void __user *)datap);
+                                               datap);
                break;
 
        default:
index 2cd42b58cb204993a08b67d21266fcbf294db818..90a15d29feebdceb4aa019bc7cc8e4f649f08721 100644 (file)
@@ -365,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
        return &user_sh_native_view;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
-       struct user * dummy = NULL;
        unsigned long __user *datap = (unsigned long __user *)data;
        int ret;
 
@@ -383,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                if (addr < sizeof(struct pt_regs))
                        tmp = get_stack_long(child, addr);
-               else if (addr >= (long) &dummy->fpu &&
-                        addr < (long) &dummy->u_fpvalid) {
+               else if (addr >= offsetof(struct user, fpu) &&
+                        addr < offsetof(struct user, u_fpvalid)) {
                        if (!tsk_used_math(child)) {
-                               if (addr == (long)&dummy->fpu.fpscr)
+                               if (addr == offsetof(struct user, fpu.fpscr))
                                        tmp = FPSCR_INIT;
                                else
                                        tmp = 0;
-                       } else
-                               tmp = ((long *)child->thread.xstate)
-                                       [(addr - (long)&dummy->fpu) >> 2];
-               } else if (addr == (long) &dummy->u_fpvalid)
+                       } else {
+                               unsigned long index;
+                               index = addr - offsetof(struct user, fpu);
+                               tmp = ((unsigned long *)child->thread.xstate)
+                                       [index >> 2];
+                       }
+               } else if (addr == offsetof(struct user, u_fpvalid))
                        tmp = !!tsk_used_math(child);
                else if (addr == PT_TEXT_ADDR)
                        tmp = child->mm->start_code;
@@ -417,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                if (addr < sizeof(struct pt_regs))
                        ret = put_stack_long(child, addr, data);
-               else if (addr >= (long) &dummy->fpu &&
-                        addr < (long) &dummy->u_fpvalid) {
+               else if (addr >= offsetof(struct user, fpu) &&
+                        addr < offsetof(struct user, u_fpvalid)) {
+                       unsigned long index;
+                       index = addr - offsetof(struct user, fpu);
                        set_stopped_child_used_math(child);
-                       ((long *)child->thread.xstate)
-                               [(addr - (long)&dummy->fpu) >> 2] = data;
+                       ((unsigned long *)child->thread.xstate)
+                               [index >> 2] = data;
                        ret = 0;
-               } else if (addr == (long) &dummy->u_fpvalid) {
+               } else if (addr == offsetof(struct user, u_fpvalid)) {
                        conditional_stopped_child_used_math(data, child);
                        ret = 0;
                }
@@ -433,35 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_sh_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETREGS:
                return copy_regset_from_user(child, &user_sh_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *)data);
+                                            datap);
 #ifdef CONFIG_SH_FPU
        case PTRACE_GETFPREGS:
                return copy_regset_to_user(child, &user_sh_native_view,
                                           REGSET_FPU,
                                           0, sizeof(struct user_fpu_struct),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETFPREGS:
                return copy_regset_from_user(child, &user_sh_native_view,
                                             REGSET_FPU,
                                             0, sizeof(struct user_fpu_struct),
-                                            (const void __user *)data);
+                                            datap);
 #endif
 #ifdef CONFIG_SH_DSP
        case PTRACE_GETDSPREGS:
                return copy_regset_to_user(child, &user_sh_native_view,
                                           REGSET_DSP,
                                           0, sizeof(struct pt_dspregs),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETDSPREGS:
                return copy_regset_from_user(child, &user_sh_native_view,
                                             REGSET_DSP,
                                             0, sizeof(struct pt_dspregs),
-                                            (const void __user *)data);
+                                            datap);
 #endif
        default:
                ret = ptrace_request(child, request, addr, data);
index e0fb065914aa18ce7fefcb38b54aa06f327a3431..4436eacddb1536e6aaf5a00299a659dc12a210c6 100644 (file)
@@ -383,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
        return &user_sh64_native_view;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
+       unsigned long __user *datap = (unsigned long __user *) data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
@@ -400,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        tmp = get_stack_long(child, addr);
                else if ((addr >= offsetof(struct user, fpu)) &&
                         (addr <  offsetof(struct user, u_fpvalid))) {
-                       tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+                       unsigned long index;
+                       index = addr - offsetof(struct user, fpu);
+                       tmp = get_fpu_long(child, index);
                } else if (addr == offsetof(struct user, u_fpvalid)) {
                        tmp = !!tsk_used_math(child);
                } else {
                        break;
                }
-               ret = put_user(tmp, (unsigned long *)data);
+               ret = put_user(tmp, datap);
                break;
        }
 
@@ -437,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
                else if ((addr >= offsetof(struct user, fpu)) &&
                         (addr <  offsetof(struct user, u_fpvalid))) {
-                       ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+                       unsigned long index;
+                       index = addr - offsetof(struct user, fpu);
+                       ret = put_fpu_long(child, index, data);
                }
                break;
 
@@ -445,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                return copy_regset_to_user(child, &user_sh64_native_view,
                                           REGSET_GENERAL,
                                           0, sizeof(struct pt_regs),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETREGS:
                return copy_regset_from_user(child, &user_sh64_native_view,
                                             REGSET_GENERAL,
                                             0, sizeof(struct pt_regs),
-                                            (const void __user *)data);
+                                            datap);
 #ifdef CONFIG_SH_FPU
        case PTRACE_GETFPREGS:
                return copy_regset_to_user(child, &user_sh64_native_view,
                                           REGSET_FPU,
                                           0, sizeof(struct user_fpu_struct),
-                                          (void __user *)data);
+                                          datap);
        case PTRACE_SETFPREGS:
                return copy_regset_from_user(child, &user_sh64_native_view,
                                             REGSET_FPU,
                                             0, sizeof(struct user_fpu_struct),
-                                            (const void __user *)data);
+                                            datap);
 #endif
        default:
                ret = ptrace_request(child, request, addr, data);
@@ -471,7 +477,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        return ret;
 }
 
-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+asmlinkage int sh64_ptrace(long request, long pid,
+                          unsigned long addr, unsigned long data)
 {
 #define WPC_DBRMODE 0x0d104008
        static unsigned long first_call;
index 2889574608db72f36b3ae1ea61309f77e22e282f..c2ced21c9dc19a7c284960d6a180f30345e5120f 100644 (file)
@@ -207,6 +207,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
 
 #define memset_io(d,c,sz)      _memset_io(d,c,sz)
 
+static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+                   __kernel_size_t n)
+{
+       char *d = dst;
+
+       while (n--) {
+               char tmp = sbus_readb(src);
+               *d++ = tmp;
+               src++;
+       }
+}
+
+#define sbus_memcpy_fromio(d, s, sz)   _sbus_memcpy_fromio(d, s, sz)
+
 static inline void
 _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 {
@@ -221,6 +236,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 
 #define memcpy_fromio(d,s,sz)  _memcpy_fromio(d,s,sz)
 
+static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+                 __kernel_size_t n)
+{
+       const char *s = src;
+       volatile void __iomem *d = dst;
+
+       while (n--) {
+               char tmp = *s++;
+               sbus_writeb(tmp, d);
+               d++;
+       }
+}
+
+#define sbus_memcpy_toio(d, s, sz)     _sbus_memcpy_toio(d, s, sz)
+
 static inline void
 _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
 {
index 9517d063c79c604d99d21967981f07b92cb1c70c..9c8965415f0a5187cd550435341029f553bdbfe2 100644 (file)
@@ -418,6 +418,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
 
 #define memset_io(d,c,sz)      _memset_io(d,c,sz)
 
+static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+                   __kernel_size_t n)
+{
+       char *d = dst;
+
+       while (n--) {
+               char tmp = sbus_readb(src);
+               *d++ = tmp;
+               src++;
+       }
+}
+
+#define sbus_memcpy_fromio(d, s, sz)   _sbus_memcpy_fromio(d, s, sz)
+
 static inline void
 _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 {
@@ -432,6 +447,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
 
 #define memcpy_fromio(d,s,sz)  _memcpy_fromio(d,s,sz)
 
+static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+                 __kernel_size_t n)
+{
+       const char *s = src;
+       volatile void __iomem *d = dst;
+
+       while (n--) {
+               char tmp = *s++;
+               sbus_writeb(tmp, d);
+               d++;
+       }
+}
+
+#define sbus_memcpy_toio(d, s, sz)     _sbus_memcpy_toio(d, s, sz)
+
 static inline void
 _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
 {
index 5312782f0b5e426418323f2371cedf2055528693..948b686ec0894a89e30bdb1716626934a570d2fb 100644 (file)
@@ -38,7 +38,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
  * types on sparc64.  However, it requires that the device
  * can drive enough of the 64 bits.
  */
-#define PCI64_REQUIRED_MASK    (~(dma64_addr_t)0)
+#define PCI64_REQUIRED_MASK    (~(u64)0)
 #define PCI64_ADDR_BASE                0xfffc000000000000UL
 
 #ifdef CONFIG_PCI
index e608f397e11f68db6b980d0d9f97ca55416c41dd..27b9e93d012168e8084d2af5e9b111d458e47ba8 100644 (file)
@@ -323,18 +323,35 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
        return &user_sparc32_view;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+struct fps {
+       unsigned long regs[32];
+       unsigned long fsr;
+       unsigned long flags;
+       unsigned long extra;
+       unsigned long fpqd;
+       struct fq {
+               unsigned long *insnaddr;
+               unsigned long insn;
+       } fpq[16];
+};
+
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
+       void __user *addr2p;
        const struct user_regset_view *view;
+       struct pt_regs __user *pregs;
+       struct fps __user *fps;
        int ret;
 
        view = task_user_regset_view(current);
+       addr2p = (void __user *) addr2;
+       pregs = (struct pt_regs __user *) addr;
+       fps = (struct fps __user *) addr;
 
        switch(request) {
        case PTRACE_GETREGS: {
-               struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
                ret = copy_regset_to_user(child, view, REGSET_GENERAL,
                                          32 * sizeof(u32),
                                          4 * sizeof(u32),
@@ -348,8 +365,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        }
 
        case PTRACE_SETREGS: {
-               struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
                ret = copy_regset_from_user(child, view, REGSET_GENERAL,
                                            32 * sizeof(u32),
                                            4 * sizeof(u32),
@@ -363,19 +378,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        }
 
        case PTRACE_GETFPREGS: {
-               struct fps {
-                       unsigned long regs[32];
-                       unsigned long fsr;
-                       unsigned long flags;
-                       unsigned long extra;
-                       unsigned long fpqd;
-                       struct fq {
-                               unsigned long *insnaddr;
-                               unsigned long insn;
-                       } fpq[16];
-               };
-               struct fps __user *fps = (struct fps __user *) addr;
-
                ret = copy_regset_to_user(child, view, REGSET_FP,
                                          0 * sizeof(u32),
                                          32 * sizeof(u32),
@@ -397,19 +399,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        }
 
        case PTRACE_SETFPREGS: {
-               struct fps {
-                       unsigned long regs[32];
-                       unsigned long fsr;
-                       unsigned long flags;
-                       unsigned long extra;
-                       unsigned long fpqd;
-                       struct fq {
-                               unsigned long *insnaddr;
-                               unsigned long insn;
-                       } fpq[16];
-               };
-               struct fps __user *fps = (struct fps __user *) addr;
-
                ret = copy_regset_from_user(child, view, REGSET_FP,
                                            0 * sizeof(u32),
                                            32 * sizeof(u32),
@@ -424,8 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_READTEXT:
        case PTRACE_READDATA:
-               ret = ptrace_readdata(child, addr,
-                                     (void __user *) addr2, data);
+               ret = ptrace_readdata(child, addr, addr2p, data);
 
                if (ret == data)
                        ret = 0;
@@ -435,8 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_WRITETEXT:
        case PTRACE_WRITEDATA:
-               ret = ptrace_writedata(child, (void __user *) addr2,
-                                      addr, data);
+               ret = ptrace_writedata(child, addr2p, addr, data);
 
                if (ret == data)
                        ret = 0;
index aa90da08bf61c84d54876e6a0e7962ef87b374c7..9ccc812bc09e66ecc61500842282be1dbfeed52e 100644 (file)
@@ -969,16 +969,19 @@ struct fps {
        unsigned long fsr;
 };
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        const struct user_regset_view *view = task_user_regset_view(current);
        unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
        struct pt_regs __user *pregs;
        struct fps __user *fps;
+       void __user *addr2p;
        int ret;
 
-       pregs = (struct pt_regs __user *) (unsigned long) addr;
-       fps = (struct fps __user *) (unsigned long) addr;
+       pregs = (struct pt_regs __user *) addr;
+       fps = (struct fps __user *) addr;
+       addr2p = (void __user *) addr2;
 
        switch (request) {
        case PTRACE_PEEKUSR:
@@ -1029,8 +1032,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_READTEXT:
        case PTRACE_READDATA:
-               ret = ptrace_readdata(child, addr,
-                                     (char __user *)addr2, data);
+               ret = ptrace_readdata(child, addr, addr2p, data);
                if (ret == data)
                        ret = 0;
                else if (ret >= 0)
@@ -1039,8 +1041,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_WRITETEXT:
        case PTRACE_WRITEDATA:
-               ret = ptrace_writedata(child, (char __user *) addr2,
-                                      addr, data);
+               ret = ptrace_writedata(child, addr2p, addr, data);
                if (ret == data)
                        ret = 0;
                else if (ret >= 0)
index 5e50c09b7dcea796aceea1b88fbd3c0338955abf..4730eac0747b50491876a0bc7e3c2c348c02b067 100644 (file)
@@ -75,7 +75,7 @@ void __kunmap_atomic(void *kvaddr)
                return;
        }
 
-       type = kmap_atomic_idx_pop();
+       type = kmap_atomic_idx();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
        {
@@ -104,6 +104,8 @@ void __kunmap_atomic(void *kvaddr)
 #endif
        }
 #endif
+
+       kmap_atomic_idx_pop();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 89cfee07efa98fc9dc78e42c0f6dc3cb36a651e2..7e8c2844e093af2112862a206bc21a6995b63ad1 100644 (file)
@@ -58,6 +58,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool y
 
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool y
+
 config LOCKDEP_SUPPORT
        def_bool y
 
index 5b20c2874d51e84da57c151f1244a42e386952bd..9cd29884c09f2bfec1cddbacb67579bbd1e821c6 100644 (file)
@@ -45,7 +45,8 @@ void ptrace_disable(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        unsigned long __user *datap = (long __user __force *)data;
        unsigned long tmp;
@@ -57,7 +58,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        switch (request) {
 
        case PTRACE_PEEKUSR:  /* Read register from pt_regs. */
-               if (addr < 0 || addr >= PTREGS_SIZE)
+               if (addr >= PTREGS_SIZE)
                        break;
                childreg = (char *)task_pt_regs(child) + addr;
 #ifdef CONFIG_COMPAT
@@ -76,7 +77,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_POKEUSR:  /* Write register in pt_regs. */
-               if (addr < 0 || addr >= PTREGS_SIZE)
+               if (addr >= PTREGS_SIZE)
                        break;
                childreg = (char *)task_pt_regs(child) + addr;
 #ifdef CONFIG_COMPAT
@@ -98,7 +99,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
                        break;
                childregs = (long *)task_pt_regs(child);
-               for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+               for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
+                               ++i) {
                        ret = __put_user(childregs[i], &datap[i]);
                        if (ret != 0)
                                break;
@@ -109,7 +111,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
                        break;
                childregs = (long *)task_pt_regs(child);
-               for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+               for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
+                               ++i) {
                        ret = __get_user(childregs[i], &datap[i]);
                        if (ret != 0)
                                break;
index f3a50e74f9a4a0768058d81564c9da9c625938a5..ae51cad12da0d6b445666d1796925816b8e7e285 100644 (file)
@@ -30,8 +30,6 @@
 #include <linux/timex.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
index 8ef6595e162c1348ca65aa21e349bd2d47b81b42..abb57331cf6e1b25fdeccfd295e25ea7464ebbf5 100644 (file)
@@ -241,7 +241,7 @@ void __kunmap_atomic(void *kvaddr)
                pte_t pteval = *pte;
                int idx, type;
 
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR*smp_processor_id();
 
                /*
@@ -252,6 +252,7 @@ void __kunmap_atomic(void *kvaddr)
                BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
                kmap_atomic_unregister(pte_page(pteval), vaddr);
                kpte_clear_flush(pte, vaddr);
+               kmap_atomic_idx_pop();
        } else {
                /* Must be a lowmem page */
                BUG_ON(vaddr < PAGE_OFFSET);
index e0510496596c9a047f3fd19860716906caf14f69..a5e33f29bbeb7ef71e9a25697e5c8bfd3e664370 100644 (file)
@@ -42,10 +42,12 @@ void ptrace_disable(struct task_struct *child)
 extern int peek_user(struct task_struct * child, long addr, long data);
 extern int poke_user(struct task_struct * child, long addr, long data);
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int i, ret;
-       unsigned long __user *p = (void __user *)(unsigned long)data;
+       unsigned long __user *p = (void __user *)data;
+       void __user *vp = p;
 
        switch (request) {
        /* read word at location addr. */
@@ -107,24 +109,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #endif
 #ifdef PTRACE_GETFPREGS
        case PTRACE_GETFPREGS: /* Get the child FPU state. */
-               ret = get_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = get_fpregs(vp, child);
                break;
 #endif
 #ifdef PTRACE_SETFPREGS
        case PTRACE_SETFPREGS: /* Set the child FPU state. */
-               ret = set_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = set_fpregs(vp, child);
                break;
 #endif
        case PTRACE_GET_THREAD_AREA:
-               ret = ptrace_get_thread_area(child, addr,
-                                            (struct user_desc __user *) data);
+               ret = ptrace_get_thread_area(child, addr, vp);
                break;
 
        case PTRACE_SET_THREAD_AREA:
-               ret = ptrace_set_thread_area(child, addr,
-                                            (struct user_desc __user *) data);
+               ret = ptrace_set_thread_area(child, addr, datavp);
                break;
 
        case PTRACE_FAULTINFO: {
@@ -134,7 +132,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                 * On i386, ptrace_faultinfo is smaller!
                 */
                ret = copy_to_user(p, &child->thread.arch.faultinfo,
-                                  sizeof(struct ptrace_faultinfo));
+                                  sizeof(struct ptrace_faultinfo)) ?
+                       -EIO : 0;
                break;
        }
 
@@ -158,7 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_ARCH_PRCTL
        case PTRACE_ARCH_PRCTL:
                /* XXX Calls ptrace on the host - needs some SMP thinking */
-               ret = arch_prctl(child, data, (void *) addr);
+               ret = arch_prctl(child, data, (void __user *) addr);
                break;
 #endif
        default:
index c9b176534d65bd15d97a86769cf978c510b7c475..d23b2d3ea3841cff23de5ed43edbcc2b1fe68375 100644 (file)
@@ -203,8 +203,8 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
                                     (unsigned long *) &fpregs);
 }
 
-long subarch_ptrace(struct task_struct *child, long request, long addr,
-                   long data)
+long subarch_ptrace(struct task_struct *child, long request,
+                   unsigned long addr, unsigned long data)
 {
        return -EIO;
 }
index f3458d7d1c5ac4a39d5d8aab4fe94c80bfa593e3..f43613643cdb71bf3100065830471965cc246541 100644 (file)
@@ -175,19 +175,18 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
        return restore_fp_registers(userspace_pid[cpu], fpregs);
 }
 
-long subarch_ptrace(struct task_struct *child, long request, long addr,
-                   long data)
+long subarch_ptrace(struct task_struct *child, long request,
+                   unsigned long addr, unsigned long data)
 {
        int ret = -EIO;
+       void __user *datap = (void __user *) data;
 
        switch (request) {
        case PTRACE_GETFPXREGS: /* Get the child FPU state. */
-               ret = get_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = get_fpregs(datap, child);
                break;
        case PTRACE_SETFPXREGS: /* Set the child FPU state. */
-               ret = set_fpregs((struct user_i387_struct __user *) data,
-                                child);
+               ret = set_fpregs(datap, child);
                break;
        }
 
index dfabfefc21c48981c456faa683108dd9b6b7a9a8..299fbc86f570ba2c39ff34b3711a6c0289b4cdd9 100644 (file)
@@ -347,6 +347,7 @@ endif
 
 config X86_VSMP
        bool "ScaleMP vSMP"
+       select PARAVIRT_GUEST
        select PARAVIRT
        depends on X86_64 && PCI
        depends on X86_EXTENDED_PLATFORM
index 0bf5b00836502fb6512609bbad05fbc3eaa96320..13b0ebaa512f77764e06956632b32755f8ca2bfa 100644 (file)
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
 
 #ifdef CONFIG_X86_32
 extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
 #else
 # define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
 #endif
 
 #define __ARCH_HAS_DO_SOFTIRQ
index 83c4bb1d917d1f3839e350cc563d2d044048e7b2..3ea3dc4870474bdbbb7d0a14e1c43e52c428ddd4 100644 (file)
 #define MSR_AMD64_IBSDCLINAD           0xc0011038
 #define MSR_AMD64_IBSDCPHYSAD          0xc0011039
 #define MSR_AMD64_IBSCTL               0xc001103a
+#define MSR_AMD64_IBSBRTARGET          0xc001103b
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
index 6e742cc4251b49b2474830107da0378951c874be..550e26b1dbb3593f324910f0197402966ae91299 100644 (file)
@@ -111,17 +111,18 @@ union cpuid10_edx {
 #define X86_PMC_IDX_FIXED_BTS                          (X86_PMC_IDX_FIXED + 16)
 
 /* IbsFetchCtl bits/masks */
-#define IBS_FETCH_RAND_EN              (1ULL<<57)
-#define IBS_FETCH_VAL                  (1ULL<<49)
-#define IBS_FETCH_ENABLE               (1ULL<<48)
-#define IBS_FETCH_CNT                  0xFFFF0000ULL
-#define IBS_FETCH_MAX_CNT              0x0000FFFFULL
+#define IBS_FETCH_RAND_EN      (1ULL<<57)
+#define IBS_FETCH_VAL          (1ULL<<49)
+#define IBS_FETCH_ENABLE       (1ULL<<48)
+#define IBS_FETCH_CNT          0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT      0x0000FFFFULL
 
 /* IbsOpCtl bits */
-#define IBS_OP_CNT_CTL                 (1ULL<<19)
-#define IBS_OP_VAL                     (1ULL<<18)
-#define IBS_OP_ENABLE                  (1ULL<<17)
-#define IBS_OP_MAX_CNT                 0x0000FFFFULL
+#define IBS_OP_CNT_CTL         (1ULL<<19)
+#define IBS_OP_VAL             (1ULL<<18)
+#define IBS_OP_ENABLE          (1ULL<<17)
+#define IBS_OP_MAX_CNT         0x0000FFFFULL
+#define IBS_OP_MAX_CNT_EXT     0x007FFFFFULL   /* not a register bit mask */
 
 #ifdef CONFIG_PERF_EVENTS
 extern void init_hw_perf_events(void);
index 4cfc908240684c561b4554281c36850643fc139f..4c2f63c7fc1b2ec071d3d32465f389a6d9df24dd 100644 (file)
@@ -50,7 +50,7 @@ struct smp_ops {
        void (*smp_prepare_cpus)(unsigned max_cpus);
        void (*smp_cpus_done)(unsigned max_cpus);
 
-       void (*smp_send_stop)(void);
+       void (*stop_other_cpus)(int wait);
        void (*smp_send_reschedule)(int cpu);
 
        int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
 
 static inline void smp_send_stop(void)
 {
-       smp_ops.smp_send_stop();
+       smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+       smp_ops.stop_other_cpus(1);
 }
 
 static inline void smp_prepare_boot_cpu(void)
index c1e8c7a5116493568e06ba353dd4150d6c027d9a..ed6310183efb4337de7489362b350f81031ca2c8 100644 (file)
@@ -237,6 +237,7 @@ struct x86_pmu {
         * Intel DebugStore bits
         */
        int             bts, pebs;
+       int             bts_active, pebs_active;
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
@@ -380,7 +381,7 @@ static void release_pmc_hardware(void) {}
 
 #endif
 
-static int reserve_ds_buffers(void);
+static void reserve_ds_buffers(void);
 static void release_ds_buffers(void);
 
 static void hw_perf_event_destroy(struct perf_event *event)
@@ -477,7 +478,7 @@ static int x86_setup_perfctr(struct perf_event *event)
        if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
            (hwc->sample_period == 1)) {
                /* BTS is not supported by this architecture. */
-               if (!x86_pmu.bts)
+               if (!x86_pmu.bts_active)
                        return -EOPNOTSUPP;
 
                /* BTS is currently only allowed for user-mode. */
@@ -496,12 +497,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
                int precise = 0;
 
                /* Support for constant skid */
-               if (x86_pmu.pebs)
+               if (x86_pmu.pebs_active) {
                        precise++;
 
-               /* Support for IP fixup */
-               if (x86_pmu.lbr_nr)
-                       precise++;
+                       /* Support for IP fixup */
+                       if (x86_pmu.lbr_nr)
+                               precise++;
+               }
 
                if (event->attr.precise_ip > precise)
                        return -EOPNOTSUPP;
@@ -543,11 +545,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
                if (atomic_read(&active_events) == 0) {
                        if (!reserve_pmc_hardware())
                                err = -EBUSY;
-                       else {
-                               err = reserve_ds_buffers();
-                               if (err)
-                                       release_pmc_hardware();
-                       }
+                       else
+                               reserve_ds_buffers();
                }
                if (!err)
                        atomic_inc(&active_events);
index 4977f9c400e5738cb668efc937c69cde22a2772d..b7dcd9f2b8a04b8204762e50700264570e8ce498 100644 (file)
@@ -74,6 +74,107 @@ static void fini_debug_store_on_cpu(int cpu)
        wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
 }
 
+static int alloc_pebs_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       int node = cpu_to_node(cpu);
+       int max, thresh = 1; /* always use a single PEBS record */
+       void *buffer;
+
+       if (!x86_pmu.pebs)
+               return 0;
+
+       buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+       if (unlikely(!buffer))
+               return -ENOMEM;
+
+       max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+       ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+       ds->pebs_index = ds->pebs_buffer_base;
+       ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+               max * x86_pmu.pebs_record_size;
+
+       ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+               thresh * x86_pmu.pebs_record_size;
+
+       return 0;
+}
+
+static void release_pebs_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds || !x86_pmu.pebs)
+               return;
+
+       kfree((void *)(unsigned long)ds->pebs_buffer_base);
+       ds->pebs_buffer_base = 0;
+}
+
+static int alloc_bts_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+       int node = cpu_to_node(cpu);
+       int max, thresh;
+       void *buffer;
+
+       if (!x86_pmu.bts)
+               return 0;
+
+       buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+       if (unlikely(!buffer))
+               return -ENOMEM;
+
+       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+       thresh = max / 16;
+
+       ds->bts_buffer_base = (u64)(unsigned long)buffer;
+       ds->bts_index = ds->bts_buffer_base;
+       ds->bts_absolute_maximum = ds->bts_buffer_base +
+               max * BTS_RECORD_SIZE;
+       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+               thresh * BTS_RECORD_SIZE;
+
+       return 0;
+}
+
+static void release_bts_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds || !x86_pmu.bts)
+               return;
+
+       kfree((void *)(unsigned long)ds->bts_buffer_base);
+       ds->bts_buffer_base = 0;
+}
+
+static int alloc_ds_buffer(int cpu)
+{
+       int node = cpu_to_node(cpu);
+       struct debug_store *ds;
+
+       ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
+       if (unlikely(!ds))
+               return -ENOMEM;
+
+       per_cpu(cpu_hw_events, cpu).ds = ds;
+
+       return 0;
+}
+
+static void release_ds_buffer(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds)
+               return;
+
+       per_cpu(cpu_hw_events, cpu).ds = NULL;
+       kfree(ds);
+}
+
 static void release_ds_buffers(void)
 {
        int cpu;
@@ -82,93 +183,77 @@ static void release_ds_buffers(void)
                return;
 
        get_online_cpus();
-
        for_each_online_cpu(cpu)
                fini_debug_store_on_cpu(cpu);
 
        for_each_possible_cpu(cpu) {
-               struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
-               if (!ds)
-                       continue;
-
-               per_cpu(cpu_hw_events, cpu).ds = NULL;
-
-               kfree((void *)(unsigned long)ds->pebs_buffer_base);
-               kfree((void *)(unsigned long)ds->bts_buffer_base);
-               kfree(ds);
+               release_pebs_buffer(cpu);
+               release_bts_buffer(cpu);
+               release_ds_buffer(cpu);
        }
-
        put_online_cpus();
 }
 
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
 {
-       int cpu, err = 0;
+       int bts_err = 0, pebs_err = 0;
+       int cpu;
+
+       x86_pmu.bts_active = 0;
+       x86_pmu.pebs_active = 0;
 
        if (!x86_pmu.bts && !x86_pmu.pebs)
-               return 0;
+               return;
+
+       if (!x86_pmu.bts)
+               bts_err = 1;
+
+       if (!x86_pmu.pebs)
+               pebs_err = 1;
 
        get_online_cpus();
 
        for_each_possible_cpu(cpu) {
-               struct debug_store *ds;
-               void *buffer;
-               int max, thresh;
+               if (alloc_ds_buffer(cpu)) {
+                       bts_err = 1;
+                       pebs_err = 1;
+               }
+
+               if (!bts_err && alloc_bts_buffer(cpu))
+                       bts_err = 1;
 
-               err = -ENOMEM;
-               ds = kzalloc(sizeof(*ds), GFP_KERNEL);
-               if (unlikely(!ds))
+               if (!pebs_err && alloc_pebs_buffer(cpu))
+                       pebs_err = 1;
+
+               if (bts_err && pebs_err)
                        break;
-               per_cpu(cpu_hw_events, cpu).ds = ds;
-
-               if (x86_pmu.bts) {
-                       buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
-                       if (unlikely(!buffer))
-                               break;
-
-                       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
-                       thresh = max / 16;
-
-                       ds->bts_buffer_base = (u64)(unsigned long)buffer;
-                       ds->bts_index = ds->bts_buffer_base;
-                       ds->bts_absolute_maximum = ds->bts_buffer_base +
-                               max * BTS_RECORD_SIZE;
-                       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
-                               thresh * BTS_RECORD_SIZE;
-               }
+       }
 
-               if (x86_pmu.pebs) {
-                       buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
-                       if (unlikely(!buffer))
-                               break;
-
-                       max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-
-                       ds->pebs_buffer_base = (u64)(unsigned long)buffer;
-                       ds->pebs_index = ds->pebs_buffer_base;
-                       ds->pebs_absolute_maximum = ds->pebs_buffer_base +
-                               max * x86_pmu.pebs_record_size;
-                       /*
-                        * Always use single record PEBS
-                        */
-                       ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
-                               x86_pmu.pebs_record_size;
-               }
+       if (bts_err) {
+               for_each_possible_cpu(cpu)
+                       release_bts_buffer(cpu);
+       }
 
-               err = 0;
+       if (pebs_err) {
+               for_each_possible_cpu(cpu)
+                       release_pebs_buffer(cpu);
        }
 
-       if (err)
-               release_ds_buffers();
-       else {
+       if (bts_err && pebs_err) {
+               for_each_possible_cpu(cpu)
+                       release_ds_buffer(cpu);
+       } else {
+               if (x86_pmu.bts && !bts_err)
+                       x86_pmu.bts_active = 1;
+
+               if (x86_pmu.pebs && !pebs_err)
+                       x86_pmu.pebs_active = 1;
+
                for_each_online_cpu(cpu)
                        init_debug_store_on_cpu(cpu);
        }
 
        put_online_cpus();
-
-       return err;
 }
 
 /*
@@ -233,7 +318,7 @@ static int intel_pmu_drain_bts_buffer(void)
        if (!event)
                return 0;
 
-       if (!ds)
+       if (!x86_pmu.bts_active)
                return 0;
 
        at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -503,7 +588,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
        struct pebs_record_core *at, *top;
        int n;
 
-       if (!ds || !x86_pmu.pebs)
+       if (!x86_pmu.pebs_active)
                return;
 
        at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -545,7 +630,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
        u64 status = 0;
        int bit, n;
 
-       if (!ds || !x86_pmu.pebs)
+       if (!x86_pmu.pebs_active)
                return;
 
        at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
@@ -630,9 +715,8 @@ static void intel_ds_init(void)
 
 #else /* CONFIG_CPU_SUP_INTEL */
 
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
 {
-       return 0;
 }
 
 static void release_ds_buffers(void)
index 0f6376ffa2d9b6da338a6145c38f8b3307fa6e36..1bc7f75a5bdaf823999f7b90271808819b6f3ad3 100644 (file)
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (kstack_end(stack))
                        break;
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-                       printk("\n%s", log_lvl);
-               printk(" %08lx", *stack++);
+                       printk(KERN_CONT "\n");
+               printk(KERN_CONT " %08lx", *stack++);
                touch_nmi_watchdog();
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
index 57a21f11c791b38a2b88559349cd041935e262da..6a340485249a965f29686ac84117458d6cb72be6 100644 (file)
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (stack >= irq_stack && stack <= irq_stack_end) {
                        if (stack == irq_stack_end) {
                                stack = (unsigned long *) (irq_stack_end[-1]);
-                               printk(" <EOI> ");
+                               printk(KERN_CONT " <EOI> ");
                        }
                } else {
                if (((long) stack & (THREAD_SIZE-1)) == 0)
                        break;
                }
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-                       printk("\n%s", log_lvl);
-               printk(" %016lx", *stack++);
+                       printk(KERN_CONT "\n");
+               printk(KERN_CONT " %016lx", *stack++);
                touch_nmi_watchdog();
        }
        preempt_enable();
 
-       printk("\n");
+       printk(KERN_CONT "\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
index 50fbbe60e5074efcc11600616e32e21e3ca31551..64668dbf00a46185f7ef9d96a6a58996509e523d 100644 (file)
@@ -60,9 +60,6 @@ union irq_ctx {
 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
 
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
 static void call_on_stack(void *func, void *stack)
 {
        asm volatile("xchgl     %%ebx,%%esp     \n"
@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
        if (per_cpu(hardirq_ctx, cpu))
                return;
 
-       irqctx = &per_cpu(hardirq_stack, cpu);
+       irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
 
        per_cpu(hardirq_ctx, cpu) = irqctx;
 
-       irqctx = &per_cpu(softirq_stack, cpu);
+       irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
               cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
 }
 
-void irq_ctx_exit(int cpu)
-{
-       per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
 asmlinkage void do_softirq(void)
 {
        unsigned long flags;
index 70c4872cd8aa0cc5a21cd3d7b1d9b8cc9a2575fb..45892dc4b72a37f01627725db2654591fc4dad6b 100644 (file)
@@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child)
 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
 #endif
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret;
        unsigned long __user *datap = (unsigned long __user *)data;
@@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                unsigned long tmp;
 
                ret = -EIO;
-               if ((addr & (sizeof(data) - 1)) || addr < 0 ||
-                   addr >= sizeof(struct user))
+               if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
                        break;
 
                tmp = 0;  /* Default return condition */
@@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
        case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                ret = -EIO;
-               if ((addr & (sizeof(data) - 1)) || addr < 0 ||
-                   addr >= sizeof(struct user))
+               if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
                        break;
 
                if (addr < sizeof(struct user_regs_struct))
@@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
        case PTRACE_GET_THREAD_AREA:
-               if (addr < 0)
+               if ((int) addr < 0)
                        return -EIO;
                ret = do_get_thread_area(child, addr,
-                                        (struct user_desc __user *) data);
+                                       (struct user_desc __user *)data);
                break;
 
        case PTRACE_SET_THREAD_AREA:
-               if (addr < 0)
+               if ((int) addr < 0)
                        return -EIO;
                ret = do_set_thread_area(child, addr,
-                                        (struct user_desc __user *) data, 0);
+                                       (struct user_desc __user *)data, 0);
                break;
 #endif
 
index f7f53dcd3e0a47eb1465ca80e301775a97979aa3..c495aa8d48159718ca1cbb6dec1ed1ca988039bc 100644 (file)
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
         */
-       smp_send_stop();
+       stop_other_cpus();
 #endif
 
        lapic_shutdown();
index d801210945d6f5d93e3a8c672243a67792bca3cd..513deac7228d2262b3e07c2915c042a791abb7f8 100644 (file)
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
        irq_exit();
 }
 
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
 {
        unsigned long flags;
-       unsigned long wait;
+       unsigned long timeout;
 
        if (reboot_force)
                return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
        if (num_online_cpus() > 1) {
                apic->send_IPI_allbutself(REBOOT_VECTOR);
 
-               /* Don't wait longer than a second */
-               wait = USEC_PER_SEC;
-               while (num_online_cpus() > 1 && wait--)
+               /*
+                * Don't wait longer than a second if the caller
+                * didn't ask us to wait.
+                */
+               timeout = USEC_PER_SEC;
+               while (num_online_cpus() > 1 && (wait || timeout--))
                        udelay(1);
        }
 
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
        .smp_prepare_cpus       = native_smp_prepare_cpus,
        .smp_cpus_done          = native_smp_cpus_done,
 
-       .smp_send_stop          = native_smp_send_stop,
+       .stop_other_cpus        = native_stop_other_cpus,
        .smp_send_reschedule    = native_smp_send_reschedule,
 
        .cpu_up                 = native_cpu_up,
index 6c7faecd9e4aba5fa26919714a696f215d7d97d3..083e99d1b7df2aba236563467f47ebb21a09943d 100644 (file)
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
 {
        idle_task_exit();
        reset_lazy_tlbstate();
-       irq_ctx_exit(raw_smp_processor_id());
        c1e_remove_cpu(raw_smp_processor_id());
 
        mb();
index d723e369003cb3658eb60f4e7eed1a94a6a4afda..b49962662101a0cf7361f0035e1b017333efc22a 100644 (file)
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
            vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
                int idx, type;
 
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR * smp_processor_id();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -87,6 +87,7 @@ void __kunmap_atomic(void *kvaddr)
                 * attributes or becomes a protected page in a hypervisor.
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
+               kmap_atomic_idx_pop();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index 84346200e783bee9a9caf37e1d9a46d7936d97ba..71a59296af80779f56d3f31c98f0ce2d790fe373 100644 (file)
@@ -51,7 +51,6 @@
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
 #include <asm/init.h>
-#include <linux/bootmem.h>
 
 static int __init parse_direct_gbpages_off(char *arg)
 {
index 75a3d7f24a2cc1d635c4820b89a1fd4873698ca9..7b179b499fa30944adfbacf0b19a1e042fd4ebe8 100644 (file)
@@ -98,7 +98,7 @@ iounmap_atomic(void __iomem *kvaddr)
            vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
                int idx, type;
 
-               type = kmap_atomic_idx_pop();
+               type = kmap_atomic_idx();
                idx = type + KM_TYPE_NR * smp_processor_id();
 
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -111,6 +111,7 @@ iounmap_atomic(void __iomem *kvaddr)
                 * attributes or becomes a protected page in a hypervisor.
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
+               kmap_atomic_idx_pop();
        }
 
        pagefault_enable();
index bd1489c3ce09b7416c400701f7fb407eae673615..4e8baad36d37739e32b71da0be2932924bacfe69 100644 (file)
@@ -726,6 +726,12 @@ int __init op_nmi_init(struct oprofile_operations *ops)
                case 0x11:
                        cpu_type = "x86-64/family11h";
                        break;
+               case 0x12:
+                       cpu_type = "x86-64/family12h";
+                       break;
+               case 0x14:
+                       cpu_type = "x86-64/family14h";
+                       break;
                default:
                        return -ENODEV;
                }
index 42fb46f8388304d5ab7a16281f58a530f3b7211c..a011bcc0f94331d82c8abfa7d4afdbbd0c59eff5 100644 (file)
@@ -48,17 +48,24 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
 
 static u32 ibs_caps;
 
-struct op_ibs_config {
+struct ibs_config {
        unsigned long op_enabled;
        unsigned long fetch_enabled;
        unsigned long max_cnt_fetch;
        unsigned long max_cnt_op;
        unsigned long rand_en;
        unsigned long dispatched_ops;
+       unsigned long branch_target;
 };
 
-static struct op_ibs_config ibs_config;
-static u64 ibs_op_ctl;
+struct ibs_state {
+       u64             ibs_op_ctl;
+       int             branch_target;
+       unsigned long   sample_size;
+};
+
+static struct ibs_config ibs_config;
+static struct ibs_state ibs_state;
 
 /*
  * IBS cpuid feature detection
@@ -71,8 +78,16 @@ static u64 ibs_op_ctl;
  * bit 0 is used to indicate the existence of IBS.
  */
 #define IBS_CAPS_AVAIL                 (1U<<0)
+#define IBS_CAPS_FETCHSAM              (1U<<1)
+#define IBS_CAPS_OPSAM                 (1U<<2)
 #define IBS_CAPS_RDWROPCNT             (1U<<3)
 #define IBS_CAPS_OPCNT                 (1U<<4)
+#define IBS_CAPS_BRNTRGT               (1U<<5)
+#define IBS_CAPS_OPCNTEXT              (1U<<6)
+
+#define IBS_CAPS_DEFAULT               (IBS_CAPS_AVAIL         \
+                                        | IBS_CAPS_FETCHSAM    \
+                                        | IBS_CAPS_OPSAM)
 
 /*
  * IBS APIC setup
@@ -99,12 +114,12 @@ static u32 get_ibs_caps(void)
        /* check IBS cpuid feature flags */
        max_level = cpuid_eax(0x80000000);
        if (max_level < IBS_CPUID_FEATURES)
-               return IBS_CAPS_AVAIL;
+               return IBS_CAPS_DEFAULT;
 
        ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
        if (!(ibs_caps & IBS_CAPS_AVAIL))
                /* cpuid flags not valid */
-               return IBS_CAPS_AVAIL;
+               return IBS_CAPS_DEFAULT;
 
        return ibs_caps;
 }
@@ -197,8 +212,8 @@ op_amd_handle_ibs(struct pt_regs * const regs,
                rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
                if (ctl & IBS_OP_VAL) {
                        rdmsrl(MSR_AMD64_IBSOPRIP, val);
-                       oprofile_write_reserve(&entry, regs, val,
-                                              IBS_OP_CODE, IBS_OP_SIZE);
+                       oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
+                                              ibs_state.sample_size);
                        oprofile_add_data64(&entry, val);
                        rdmsrl(MSR_AMD64_IBSOPDATA, val);
                        oprofile_add_data64(&entry, val);
@@ -210,10 +225,14 @@ op_amd_handle_ibs(struct pt_regs * const regs,
                        oprofile_add_data64(&entry, val);
                        rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
                        oprofile_add_data64(&entry, val);
+                       if (ibs_state.branch_target) {
+                               rdmsrl(MSR_AMD64_IBSBRTARGET, val);
+                               oprofile_add_data(&entry, (unsigned long)val);
+                       }
                        oprofile_write_commit(&entry);
 
                        /* reenable the IRQ */
-                       ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
+                       ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
                        wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
                }
        }
@@ -226,21 +245,32 @@ static inline void op_amd_start_ibs(void)
        if (!ibs_caps)
                return;
 
+       memset(&ibs_state, 0, sizeof(ibs_state));
+
+       /*
+        * Note: Since the max count settings may out of range we
+        * write back the actual used values so that userland can read
+        * it.
+        */
+
        if (ibs_config.fetch_enabled) {
-               val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
+               val = ibs_config.max_cnt_fetch >> 4;
+               val = min(val, IBS_FETCH_MAX_CNT);
+               ibs_config.max_cnt_fetch = val << 4;
                val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
                val |= IBS_FETCH_ENABLE;
                wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
        }
 
        if (ibs_config.op_enabled) {
-               ibs_op_ctl = ibs_config.max_cnt_op >> 4;
+               val = ibs_config.max_cnt_op >> 4;
                if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
                        /*
                         * IbsOpCurCnt not supported.  See
                         * op_amd_randomize_ibs_op() for details.
                         */
-                       ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
+                       val = clamp(val, 0x0081ULL, 0xFF80ULL);
+                       ibs_config.max_cnt_op = val << 4;
                } else {
                        /*
                         * The start value is randomized with a
@@ -248,13 +278,24 @@ static inline void op_amd_start_ibs(void)
                         * with the half of the randomized range. Also
                         * avoid underflows.
                         */
-                       ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
-                                        IBS_OP_MAX_CNT);
+                       val += IBS_RANDOM_MAXCNT_OFFSET;
+                       if (ibs_caps & IBS_CAPS_OPCNTEXT)
+                               val = min(val, IBS_OP_MAX_CNT_EXT);
+                       else
+                               val = min(val, IBS_OP_MAX_CNT);
+                       ibs_config.max_cnt_op =
+                               (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
+               }
+               val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
+               val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+               val |= IBS_OP_ENABLE;
+               ibs_state.ibs_op_ctl = val;
+               ibs_state.sample_size = IBS_OP_SIZE;
+               if (ibs_config.branch_target) {
+                       ibs_state.branch_target = 1;
+                       ibs_state.sample_size++;
                }
-               if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
-                       ibs_op_ctl |= IBS_OP_CNT_CTL;
-               ibs_op_ctl |= IBS_OP_ENABLE;
-               val = op_amd_randomize_ibs_op(ibs_op_ctl);
+               val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
                wrmsrl(MSR_AMD64_IBSOPCTL, val);
        }
 }
@@ -281,29 +322,25 @@ static inline int eilvt_is_available(int offset)
 
 static inline int ibs_eilvt_valid(void)
 {
-       u64 val;
        int offset;
+       u64 val;
 
        rdmsrl(MSR_AMD64_IBSCTL, val);
+       offset = val & IBSCTL_LVT_OFFSET_MASK;
+
        if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
-               pr_err(FW_BUG "cpu %d, invalid IBS "
-                      "interrupt offset %d (MSR%08X=0x%016llx)",
-                      smp_processor_id(), offset,
-                      MSR_AMD64_IBSCTL, val);
+               pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
+                      smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
                return 0;
        }
 
-       offset = val & IBSCTL_LVT_OFFSET_MASK;
-
-       if (eilvt_is_available(offset))
-               return !0;
-
-       pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
-              "not available (MSR%08X=0x%016llx)",
-              smp_processor_id(), offset,
-              MSR_AMD64_IBSCTL, val);
+       if (!eilvt_is_available(offset)) {
+               pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
+                      smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+               return 0;
+       }
 
-       return 0;
+       return 1;
 }
 
 static inline int get_ibs_offset(void)
@@ -630,28 +667,33 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
        /* model specific files */
 
        /* setup some reasonable defaults */
+       memset(&ibs_config, 0, sizeof(ibs_config));
        ibs_config.max_cnt_fetch = 250000;
-       ibs_config.fetch_enabled = 0;
        ibs_config.max_cnt_op = 250000;
-       ibs_config.op_enabled = 0;
-       ibs_config.dispatched_ops = 0;
-
-       dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
-       oprofilefs_create_ulong(sb, dir, "enable",
-                               &ibs_config.fetch_enabled);
-       oprofilefs_create_ulong(sb, dir, "max_count",
-                               &ibs_config.max_cnt_fetch);
-       oprofilefs_create_ulong(sb, dir, "rand_enable",
-                               &ibs_config.rand_en);
-
-       dir = oprofilefs_mkdir(sb, root, "ibs_op");
-       oprofilefs_create_ulong(sb, dir, "enable",
-                               &ibs_config.op_enabled);
-       oprofilefs_create_ulong(sb, dir, "max_count",
-                               &ibs_config.max_cnt_op);
-       if (ibs_caps & IBS_CAPS_OPCNT)
-               oprofilefs_create_ulong(sb, dir, "dispatched_ops",
-                                       &ibs_config.dispatched_ops);
+
+       if (ibs_caps & IBS_CAPS_FETCHSAM) {
+               dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
+               oprofilefs_create_ulong(sb, dir, "enable",
+                                       &ibs_config.fetch_enabled);
+               oprofilefs_create_ulong(sb, dir, "max_count",
+                                       &ibs_config.max_cnt_fetch);
+               oprofilefs_create_ulong(sb, dir, "rand_enable",
+                                       &ibs_config.rand_en);
+       }
+
+       if (ibs_caps & IBS_CAPS_OPSAM) {
+               dir = oprofilefs_mkdir(sb, root, "ibs_op");
+               oprofilefs_create_ulong(sb, dir, "enable",
+                                       &ibs_config.op_enabled);
+               oprofilefs_create_ulong(sb, dir, "max_count",
+                                       &ibs_config.max_cnt_op);
+               if (ibs_caps & IBS_CAPS_OPCNT)
+                       oprofilefs_create_ulong(sb, dir, "dispatched_ops",
+                                               &ibs_config.dispatched_ops);
+               if (ibs_caps & IBS_CAPS_BRNTRGT)
+                       oprofilefs_create_ulong(sb, dir, "branch_target",
+                                               &ibs_config.branch_target);
+       }
 
        return 0;
 }
index 44ab12dc2a12ee57a2f966d420e2c4cc46e0b704..70ddeaeb1ef366c7e84ba520488f85a546d3e40a 100644 (file)
@@ -59,7 +59,6 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/reboot.h>
-#include <asm/setup.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
 
@@ -1016,7 +1015,7 @@ static void xen_reboot(int reason)
        struct sched_shutdown r = { .reason = reason };
 
 #ifdef CONFIG_SMP
-       smp_send_stop();
+       stop_other_cpus();
 #endif
 
        if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
index 25f232b18a823a5e3f015c2dfac4b38a39dcb2ed..f4d01003146581c1f58cf4835ed989de7854def5 100644 (file)
@@ -400,9 +400,9 @@ static void stop_self(void *v)
        BUG();
 }
 
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
 {
-       smp_call_function(stop_self, NULL, 0);
+       smp_call_function(stop_self, NULL, wait);
 }
 
 static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
        .cpu_disable = xen_cpu_disable,
        .play_dead = xen_play_dead,
 
-       .smp_send_stop = xen_smp_send_stop,
+       .stop_other_cpus = xen_stop_other_cpus,
        .smp_send_reschedule = xen_smp_send_reschedule,
 
        .send_call_func_ipi = xen_smp_send_call_function_ipi,
index 9d4e1ceb3f09db9234919dc3b0279dd5178f71e4..c72c9473ef9913f9666ff54f869fe35fdfd27509 100644 (file)
@@ -256,9 +256,11 @@ int ptrace_pokeusr(struct task_struct *child, long regno, long val)
        return 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
 {
        int ret = -EPERM;
+       void __user *datap = (void __user *) data;
 
        switch (request) {
        case PTRACE_PEEKTEXT:   /* read word at location addr. */
@@ -267,7 +269,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_PEEKUSR:    /* read register specified by addr. */
-               ret = ptrace_peekusr(child, addr, (void __user *) data);
+               ret = ptrace_peekusr(child, addr, datap);
                break;
 
        case PTRACE_POKETEXT:   /* write the word at location addr. */
@@ -280,19 +282,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GETREGS:
-               ret = ptrace_getregs(child, (void __user *) data);
+               ret = ptrace_getregs(child, datap);
                break;
 
        case PTRACE_SETREGS:
-               ret = ptrace_setregs(child, (void __user *) data);
+               ret = ptrace_setregs(child, datap);
                break;
 
        case PTRACE_GETXTREGS:
-               ret = ptrace_getxregs(child, (void __user *) data);
+               ret = ptrace_getxregs(child, datap);
                break;
 
        case PTRACE_SETXTREGS:
-               ret = ptrace_setxregs(child, (void __user *) data);
+               ret = ptrace_setxregs(child, datap);
                break;
 
        default:
index 5de2ed13b35d46dbaeff1074d7233ba6dd4a38bb..1b11abbb5c912c2fe004eda04cd6d0244f7b8a17 100644 (file)
@@ -24,19 +24,6 @@ config ASYNC_RAID6_RECOV
        select ASYNC_PQ
        select ASYNC_XOR
 
-config ASYNC_RAID6_TEST
-       tristate "Self test for hardware accelerated raid6 recovery"
-       depends on ASYNC_RAID6_RECOV
-       select ASYNC_MEMCPY
-       ---help---
-         This is a one-shot self test that permutes through the
-         recovery of all the possible two disk failure scenarios for a
-         N-disk array.  Recovery is performed with the asynchronous
-         raid6 recovery routines, and will optionally use an offload
-         engine if one is available.
-
-         If unsure, say N.
-
 config ASYNC_TX_DISABLE_PQ_VAL_DMA
        bool
 
index 80f9f3659e4d83f246bafb936c2490ff15455c4a..97c5898cd76e894350dfcc3a444de9ce2d202e35 100644 (file)
@@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
                eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
                if (readl(&eprom->magic) != ENI155_MAGIC) {
                        printk("\n");
-                       printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
-                           "magic - expected 0x%x, got 0x%x\n",dev->number,
-                           ENI155_MAGIC,(unsigned) readl(&eprom->magic));
+                       printk(KERN_ERR DEV_LABEL
+                              "(itf %d): bad magic - expected 0x%x, got 0x%x\n",
+                              dev->number, ENI155_MAGIC,
+                              (unsigned)readl(&eprom->magic));
                        error = -EINVAL;
                        goto unmap;
                }
index e7ba774beda6b8598a47dd77faf52e5283437037..25373df1dcf8c256404626dc4376c56253039089 100644 (file)
@@ -566,6 +566,7 @@ static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_
                                struct mailbox mailbox;
 
                                /* Got a packet for us */
+                               memset(&st_loc, 0, sizeof(st_loc));
                                ret = do_ac_read(i, buf, &st_loc, &mailbox);
                                spin_unlock_irqrestore(&apbs[i].mutex, flags);
                                set_current_state(TASK_RUNNING);
index 3afd62e856ebfea311eba94c496ac6628f586458..e9cba13ee800672275f40e7722b6e5b87b73c278 100644 (file)
@@ -713,7 +713,6 @@ static int khvcd(void *unused)
        struct hvc_struct *hp;
 
        set_freezable();
-       __set_current_state(TASK_RUNNING);
        do {
                poll_mask = 0;
                hvc_kicked = 0;
index bc397d92b499cd6c848d246f8f251aa4aa4670a6..7b78e0dfc5b08beb68b5865b017237c3b0ded620 100644 (file)
@@ -4,5 +4,5 @@
 
 obj-$(CONFIG_COMPUTONE)         += ip2.o
 
-ip2-objs                       := ip2main.o
+ip2-y                  := ip2main.o
 
index eb8a1a8c188e482571a185488f60884c91c9878e..16a93648d54e3b25ccc78f45cb56412ae9c8723e 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the ipmi drivers.
 #
 
-ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
 
 obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
 obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
index 1fc8876af1f529ef945cbf0df553cfc76490e454..2aa3977aae5e35d28f897416d89e53731a5c38ee 100644 (file)
@@ -916,7 +916,7 @@ static struct ipmi_smi_watcher smi_watcher =
        .smi_gone = ipmi_smi_gone,
 };
 
-static __init int init_ipmi_devintf(void)
+static int __init init_ipmi_devintf(void)
 {
        int rv;
 
@@ -954,7 +954,7 @@ static __init int init_ipmi_devintf(void)
 }
 module_init(init_ipmi_devintf);
 
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
 {
        struct ipmi_reg_list *entry, *entry2;
        mutex_lock(&reg_list_mutex);
index 4f3f8c9ec2629d52de7c538b5bad895628a61299..2fe72f8edf4475e346ac6658fc81a960693b0b77 100644 (file)
@@ -4442,13 +4442,13 @@ static int ipmi_init_msghandler(void)
        return 0;
 }
 
-static __init int ipmi_init_msghandler_mod(void)
+static int __init ipmi_init_msghandler_mod(void)
 {
        ipmi_init_msghandler();
        return 0;
 }
 
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
 {
        int count;
 
index b293d57d30a79ee11d6177fb9296a2250c64f1a3..035da9e64a1704fce0d28e35cb74a5bdd8ef0593 100644 (file)
@@ -1846,7 +1846,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
        return rv;
 }
 
-static __devinit void hardcode_find_bmc(void)
+static void __devinit hardcode_find_bmc(void)
 {
        int             i;
        struct smi_info *info;
@@ -2029,7 +2029,7 @@ struct SPMITable {
        s8      spmi_id[1]; /* A '\0' terminated array starts here. */
 };
 
-static __devinit int try_init_spmi(struct SPMITable *spmi)
+static int __devinit try_init_spmi(struct SPMITable *spmi)
 {
        struct smi_info  *info;
 
@@ -2112,7 +2112,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
        return 0;
 }
 
-static __devinit void spmi_find_bmc(void)
+static void __devinit spmi_find_bmc(void)
 {
        acpi_status      status;
        struct SPMITable *spmi;
@@ -2325,7 +2325,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
        return 0;
 }
 
-static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
 {
        struct smi_info *info;
 
@@ -3012,7 +3012,7 @@ static __devinitdata struct ipmi_default_vals
        { .port = 0 }
 };
 
-static __devinit void default_find_bmc(void)
+static void __devinit default_find_bmc(void)
 {
        struct smi_info *info;
        int             i;
@@ -3312,7 +3312,7 @@ static int try_smi_init(struct smi_info *new_smi)
        return rv;
 }
 
-static __devinit int init_ipmi_si(void)
+static int __devinit init_ipmi_si(void)
 {
        int  i;
        char *str;
@@ -3525,7 +3525,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
        kfree(to_clean);
 }
 
-static __exit void cleanup_ipmi_si(void)
+static void __exit cleanup_ipmi_si(void)
 {
        struct smi_info *e, *tmp_e;
 
index c070b53984e45f77a2805e0428027fbe594622bd..e6d75627c6c815a794b54924e5f03c96feb7625e 100644 (file)
@@ -176,9 +176,9 @@ static void mmtimer_setup_int_2(int cpu, u64 expires)
  * in order to insure that the setup succeeds in a deterministic time frame.
  * It will check if the interrupt setup succeeded.
  */
-static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
+static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
+       u64 *set_completion_time)
 {
-
        switch (comparator) {
        case 0:
                mmtimer_setup_int_0(cpu, expires);
@@ -191,7 +191,8 @@ static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
                break;
        }
        /* We might've missed our expiration time */
-       if (rtc_time() <= expires)
+       *set_completion_time = rtc_time();
+       if (*set_completion_time <= expires)
                return 1;
 
        /*
@@ -227,6 +228,8 @@ static int mmtimer_disable_int(long nasid, int comparator)
 #define TIMER_OFF      0xbadcabLL      /* Timer is not setup */
 #define TIMER_SET      0               /* Comparator is set for this timer */
 
+#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
+
 /* There is one of these for each timer */
 struct mmtimer {
        struct rb_node list;
@@ -242,6 +245,11 @@ struct mmtimer_node {
 };
 static struct mmtimer_node *timers;
 
+static unsigned mmtimer_interval_retry_increment =
+       MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
+module_param(mmtimer_interval_retry_increment, uint, 0644);
+MODULE_PARM_DESC(mmtimer_interval_retry_increment,
+       "RTC ticks to add to expiration on interval retry (default 40)");
 
 /*
  * Add a new mmtimer struct to the node's mmtimer list.
@@ -289,7 +297,8 @@ static void mmtimer_set_next_timer(int nodeid)
        struct mmtimer_node *n = &timers[nodeid];
        struct mmtimer *x;
        struct k_itimer *t;
-       int o;
+       u64 expires, exp, set_completion_time;
+       int i;
 
 restart:
        if (n->next == NULL)
@@ -300,7 +309,8 @@ restart:
        if (!t->it.mmtimer.incr) {
                /* Not an interval timer */
                if (!mmtimer_setup(x->cpu, COMPARATOR,
-                                       t->it.mmtimer.expires)) {
+                                       t->it.mmtimer.expires,
+                                       &set_completion_time)) {
                        /* Late setup, fire now */
                        tasklet_schedule(&n->tasklet);
                }
@@ -308,14 +318,23 @@ restart:
        }
 
        /* Interval timer */
-       o = 0;
-       while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
-               unsigned long e, e1;
-               struct rb_node *next;
-               t->it.mmtimer.expires += t->it.mmtimer.incr << o;
-               t->it_overrun += 1 << o;
-               o++;
-               if (o > 20) {
+       i = 0;
+       expires = exp = t->it.mmtimer.expires;
+       while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
+                               &set_completion_time)) {
+               int to;
+
+               i++;
+               expires = set_completion_time +
+                               mmtimer_interval_retry_increment + (1 << i);
+               /* Calculate overruns as we go. */
+               to = ((u64)(expires - exp) / t->it.mmtimer.incr);
+               if (to) {
+                       t->it_overrun += to;
+                       t->it.mmtimer.expires += t->it.mmtimer.incr * to;
+                       exp = t->it.mmtimer.expires;
+               }
+               if (i > 20) {
                        printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
                        t->it.mmtimer.clock = TIMER_OFF;
                        n->next = rb_next(&x->list);
@@ -323,21 +342,6 @@ restart:
                        kfree(x);
                        goto restart;
                }
-
-               e = t->it.mmtimer.expires;
-               next = rb_next(&x->list);
-
-               if (next == NULL)
-                       continue;
-
-               e1 = rb_entry(next, struct mmtimer, list)->
-                       timer->it.mmtimer.expires;
-               if (e > e1) {
-                       n->next = next;
-                       rb_erase(&x->list, &n->timer_head);
-                       mmtimer_add_list(x);
-                       goto restart;
-               }
        }
 }
 
index 754c9e2058ed66bba4df34128cb9005cad9d4bb7..26b4fce217b6475ecb3e8a696cb82e4f9f0cccb6 100644 (file)
@@ -6,10 +6,10 @@
 
 obj-$(CONFIG_MWAVE) += mwave.o
 
-mwave-objs := mwavedd.o smapi.o tp3780i.o 3780i.o
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
 
 # To have the mwave driver disable other uarts if necessary
 # EXTRA_CFLAGS += -DMWAVE_FUTZ_WITH_OTHER_DEVICES
 
 # To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-EXTRA_CFLAGS += -DMW_TRACE
+ccflags-y := -DMW_TRACE
index 463df27494bd950d7503be8bd6b18b647dd8e8a8..dd9d75351cd6a3362a0e768770222262fcf8a796 100644 (file)
@@ -303,6 +303,7 @@ static void mxser_enable_must_enchance_mode(unsigned long baseio)
        outb(oldlcr, baseio + UART_LCR);
 }
 
+#ifdef CONFIG_PCI
 static void mxser_disable_must_enchance_mode(unsigned long baseio)
 {
        u8 oldlcr;
@@ -317,6 +318,7 @@ static void mxser_disable_must_enchance_mode(unsigned long baseio)
        outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
        outb(oldlcr, baseio + UART_LCR);
 }
+#endif
 
 static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
 {
@@ -388,6 +390,7 @@ static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
        outb(oldlcr, baseio + UART_LCR);
 }
 
+#ifdef CONFIG_PCI
 static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
 {
        u8 oldlcr;
@@ -404,6 +407,7 @@ static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
        *pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
        outb(oldlcr, baseio + UART_LCR);
 }
+#endif
 
 static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
 {
index b71eb593643d61077f41c7b4599207c03d95d123..db80873d7f201a72438cb182a89992bc75b1cd8f 100644 (file)
@@ -6,5 +6,5 @@
 
 obj-$(CONFIG_IPWIRELESS) += ipwireless.o
 
-ipwireless-objs := hardware.o main.o network.o tty.o
+ipwireless-y := hardware.o main.o network.o tty.o
 
index 723152d978a9482ac3c1e48c2724894a9d6cceb4..f176dbaeb15adfaaceda46ad6bd97e030065ccd8 100644 (file)
@@ -613,6 +613,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PPGETTIME:
                to_jiffies = pp->pdev->timeout;
+               memset(&par_timeout, 0, sizeof(par_timeout));
                par_timeout.tv_sec = to_jiffies / HZ;
                par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
                if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
index 74f00b5ffa362dc20943b572c13d802c6867e49d..73dcb0ee41fdaebcbefee2b0ddf705bd5a26b1e9 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/time.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/ramoops.h>
 
 #define RAMOOPS_KERNMSG_HDR "===="
 #define RAMOOPS_HEADER_SIZE   (5 + sizeof(struct timeval))
@@ -91,11 +93,17 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
        cxt->count = (cxt->count + 1) % cxt->max_count;
 }
 
-static int __init ramoops_init(void)
+static int __init ramoops_probe(struct platform_device *pdev)
 {
+       struct ramoops_platform_data *pdata = pdev->dev.platform_data;
        struct ramoops_context *cxt = &oops_cxt;
        int err = -EINVAL;
 
+       if (pdata) {
+               mem_size = pdata->mem_size;
+               mem_address = pdata->mem_address;
+       }
+
        if (!mem_size) {
                printk(KERN_ERR "ramoops: invalid size specification");
                goto fail3;
@@ -142,7 +150,7 @@ fail3:
        return err;
 }
 
-static void __exit ramoops_exit(void)
+static int __exit ramoops_remove(struct platform_device *pdev)
 {
        struct ramoops_context *cxt = &oops_cxt;
 
@@ -151,8 +159,26 @@ static void __exit ramoops_exit(void)
 
        iounmap(cxt->virt_addr);
        release_mem_region(cxt->phys_addr, cxt->size);
+       return 0;
 }
 
+static struct platform_driver ramoops_driver = {
+       .remove         = __exit_p(ramoops_remove),
+       .driver         = {
+               .name   = "ramoops",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init ramoops_init(void)
+{
+       return platform_driver_probe(&ramoops_driver, ramoops_probe);
+}
+
+static void __exit ramoops_exit(void)
+{
+       platform_driver_unregister(&ramoops_driver);
+}
 
 module_init(ramoops_init);
 module_exit(ramoops_exit);
index 2d1c5a7cba7d9f8ac8a4126284c406d3bb594bb3..1661875883fb723bfcc51af0d7a53e487c7cf920 100644 (file)
@@ -8,5 +8,5 @@
 
 obj-$(CONFIG_RIO) += rio.o
 
-rio-objs := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
+rio-y := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
             rioparam.o rioroute.o riotable.o riotty.o
index 7c79d243acc9b3fac68d6b4062fc9a43bb93eb65..86308830ac42ac450176de5e8b451c686850a3af 100644 (file)
@@ -2345,7 +2345,7 @@ static int __init rp_init(void)
        ret = tty_register_driver(rocket_driver);
        if (ret < 0) {
                printk(KERN_ERR "Couldn't install tty RocketPort driver\n");
-               goto err_tty;
+               goto err_controller;
        }
 
 #ifdef ROCKET_DEBUG_OPEN
@@ -2380,6 +2380,9 @@ static int __init rp_init(void)
        return 0;
 err_ttyu:
        tty_unregister_driver(rocket_driver);
+err_controller:
+       if (controller)
+               release_region(controller, 4);
 err_tty:
        put_tty_driver(rocket_driver);
 err:
index 1746d91205f7e01fe148aa335c7f2f1fec352249..d01fffeac95171f53cf8d4e7f52eb239989c27ea 100644 (file)
@@ -301,6 +301,8 @@ struct slgt_info {
        unsigned int rx_pio;
        unsigned int if_mode;
        unsigned int base_clock;
+       unsigned int xsync;
+       unsigned int xctrl;
 
        /* device status */
 
@@ -405,6 +407,8 @@ static MGSL_PARAMS default_params = {
 #define TDCSR 0x94 /* tx DMA control/status */
 #define RDDAR 0x98 /* rx DMA descriptor address */
 #define TDDAR 0x9c /* tx DMA descriptor address */
+#define XSR   0x40 /* extended sync pattern */
+#define XCR   0x44 /* extended control */
 
 #define RXIDLE      BIT14
 #define RXBREAK     BIT14
@@ -517,6 +521,10 @@ static int  set_interface(struct slgt_info *info, int if_mode);
 static int  set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
 static int  get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
 static int  wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int  get_xsync(struct slgt_info *info, int __user *if_mode);
+static int  set_xsync(struct slgt_info *info, int if_mode);
+static int  get_xctrl(struct slgt_info *info, int __user *if_mode);
+static int  set_xctrl(struct slgt_info *info, int if_mode);
 
 /*
  * driver functions
@@ -1056,6 +1064,14 @@ static int ioctl(struct tty_struct *tty, struct file *file,
                return get_gpio(info, argp);
        case MGSL_IOCWAITGPIO:
                return wait_gpio(info, argp);
+       case MGSL_IOCGXSYNC:
+               return get_xsync(info, argp);
+       case MGSL_IOCSXSYNC:
+               return set_xsync(info, (int)arg);
+       case MGSL_IOCGXCTRL:
+               return get_xctrl(info, argp);
+       case MGSL_IOCSXCTRL:
+               return set_xctrl(info, (int)arg);
        }
        mutex_lock(&info->port.mutex);
        switch (cmd) {
@@ -1132,6 +1148,7 @@ static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *us
        struct MGSL_PARAMS32 tmp_params;
 
        DBGINFO(("%s get_params32\n", info->device_name));
+       memset(&tmp_params, 0, sizeof(tmp_params));
        tmp_params.mode            = (compat_ulong_t)info->params.mode;
        tmp_params.loopback        = info->params.loopback;
        tmp_params.flags           = info->params.flags;
@@ -1212,12 +1229,16 @@ static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
        case MGSL_IOCSGPIO:
        case MGSL_IOCGGPIO:
        case MGSL_IOCWAITGPIO:
+       case MGSL_IOCGXSYNC:
+       case MGSL_IOCGXCTRL:
        case MGSL_IOCSTXIDLE:
        case MGSL_IOCTXENABLE:
        case MGSL_IOCRXENABLE:
        case MGSL_IOCTXABORT:
        case TIOCMIWAIT:
        case MGSL_IOCSIF:
+       case MGSL_IOCSXSYNC:
+       case MGSL_IOCSXCTRL:
                rc = ioctl(tty, file, cmd, arg);
                break;
        }
@@ -1617,6 +1638,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        if (cmd != SIOCWANDEV)
                return hdlc_ioctl(dev, ifr, cmd);
 
+       memset(&new_line, 0, sizeof(new_line));
+
        switch(ifr->ifr_settings.type) {
        case IF_GET_IFACE: /* return current sync_serial_settings */
 
@@ -1958,6 +1981,7 @@ static void bh_handler(struct work_struct *work)
                        case MGSL_MODE_RAW:
                        case MGSL_MODE_MONOSYNC:
                        case MGSL_MODE_BISYNC:
+                       case MGSL_MODE_XSYNC:
                                while(rx_get_buf(info));
                                break;
                        }
@@ -2357,26 +2381,27 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
 
        DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
 
-       spin_lock(&info->lock);
-
        while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
                DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
                info->irq_occurred = true;
                for(i=0; i < info->port_count ; i++) {
                        if (info->port_array[i] == NULL)
                                continue;
+                       spin_lock(&info->port_array[i]->lock);
                        if (gsr & (BIT8 << i))
                                isr_serial(info->port_array[i]);
                        if (gsr & (BIT16 << (i*2)))
                                isr_rdma(info->port_array[i]);
                        if (gsr & (BIT17 << (i*2)))
                                isr_tdma(info->port_array[i]);
+                       spin_unlock(&info->port_array[i]->lock);
                }
        }
 
        if (info->gpio_present) {
                unsigned int state;
                unsigned int changed;
+               spin_lock(&info->lock);
                while ((changed = rd_reg32(info, IOSR)) != 0) {
                        DBGISR(("%s iosr=%08x\n", info->device_name, changed));
                        /* read latched state of GPIO signals */
@@ -2388,22 +2413,24 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
                                        isr_gpio(info->port_array[i], changed, state);
                        }
                }
+               spin_unlock(&info->lock);
        }
 
        for(i=0; i < info->port_count ; i++) {
                struct slgt_info *port = info->port_array[i];
-
-               if (port && (port->port.count || port->netcount) &&
+               if (port == NULL)
+                       continue;
+               spin_lock(&port->lock);
+               if ((port->port.count || port->netcount) &&
                    port->pending_bh && !port->bh_running &&
                    !port->bh_requested) {
                        DBGISR(("%s bh queued\n", port->device_name));
                        schedule_work(&port->task);
                        port->bh_requested = true;
                }
+               spin_unlock(&port->lock);
        }
 
-       spin_unlock(&info->lock);
-
        DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
        return IRQ_HANDLED;
 }
@@ -2883,6 +2910,69 @@ static int set_interface(struct slgt_info *info, int if_mode)
        return 0;
 }
 
+static int get_xsync(struct slgt_info *info, int __user *xsync)
+{
+       DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
+       if (put_user(info->xsync, xsync))
+               return -EFAULT;
+       return 0;
+}
+
+/*
+ * set extended sync pattern (1 to 4 bytes) for extended sync mode
+ *
+ * sync pattern is contained in least significant bytes of value
+ * most significant byte of sync pattern is oldest (1st sent/detected)
+ */
+static int set_xsync(struct slgt_info *info, int xsync)
+{
+       unsigned long flags;
+
+       DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
+       spin_lock_irqsave(&info->lock, flags);
+       info->xsync = xsync;
+       wr_reg32(info, XSR, xsync);
+       spin_unlock_irqrestore(&info->lock, flags);
+       return 0;
+}
+
+static int get_xctrl(struct slgt_info *info, int __user *xctrl)
+{
+       DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
+       if (put_user(info->xctrl, xctrl))
+               return -EFAULT;
+       return 0;
+}
+
+/*
+ * set extended control options
+ *
+ * xctrl[31:19] reserved, must be zero
+ * xctrl[18:17] extended sync pattern length in bytes
+ *              00 = 1 byte  in xsr[7:0]
+ *              01 = 2 bytes in xsr[15:0]
+ *              10 = 3 bytes in xsr[23:0]
+ *              11 = 4 bytes in xsr[31:0]
+ * xctrl[16]    1 = enable terminal count, 0=disabled
+ * xctrl[15:0]  receive terminal count for fixed length packets
+ *              value is count minus one (0 = 1 byte packet)
+ *              when terminal count is reached, receiver
+ *              automatically returns to hunt mode and receive
+ *              FIFO contents are flushed to DMA buffers with
+ *              end of frame (EOF) status
+ */
+static int set_xctrl(struct slgt_info *info, int xctrl)
+{
+       unsigned long flags;
+
+       DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
+       spin_lock_irqsave(&info->lock, flags);
+       info->xctrl = xctrl;
+       wr_reg32(info, XCR, xctrl);
+       spin_unlock_irqrestore(&info->lock, flags);
+       return 0;
+}
+
 /*
  * set general purpose IO pin state and direction
  *
@@ -2906,7 +2996,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
                 info->device_name, gpio.state, gpio.smask,
                 gpio.dir, gpio.dmask));
 
-       spin_lock_irqsave(&info->lock,flags);
+       spin_lock_irqsave(&info->port_array[0]->lock, flags);
        if (gpio.dmask) {
                data = rd_reg32(info, IODR);
                data |= gpio.dmask & gpio.dir;
@@ -2919,7 +3009,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
                data &= ~(gpio.smask & ~gpio.state);
                wr_reg32(info, IOVR, data);
        }
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
 
        return 0;
 }
@@ -3020,7 +3110,7 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
                return -EINVAL;
        init_cond_wait(&wait, gpio.smask);
 
-       spin_lock_irqsave(&info->lock, flags);
+       spin_lock_irqsave(&info->port_array[0]->lock, flags);
        /* enable interrupts for watched pins */
        wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
        /* get current pin states */
@@ -3032,20 +3122,20 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
        } else {
                /* wait for target state */
                add_cond_wait(&info->gpio_wait_q, &wait);
-               spin_unlock_irqrestore(&info->lock, flags);
+               spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
                schedule();
                if (signal_pending(current))
                        rc = -ERESTARTSYS;
                else
                        gpio.state = wait.data;
-               spin_lock_irqsave(&info->lock, flags);
+               spin_lock_irqsave(&info->port_array[0]->lock, flags);
                remove_cond_wait(&info->gpio_wait_q, &wait);
        }
 
        /* disable all GPIO interrupts if no waiting processes */
        if (info->gpio_wait_q == NULL)
                wr_reg32(info, IOER, 0);
-       spin_unlock_irqrestore(&info->lock,flags);
+       spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
 
        if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
                rc = -EFAULT;
@@ -3578,7 +3668,6 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
 
                /* copy resource information from first port to others */
                for (i = 1; i < port_count; ++i) {
-                       port_array[i]->lock      = port_array[0]->lock;
                        port_array[i]->irq_level = port_array[0]->irq_level;
                        port_array[i]->reg_addr  = port_array[0]->reg_addr;
                        alloc_dma_bufs(port_array[i]);
@@ -3763,7 +3852,9 @@ module_exit(slgt_exit);
 #define CALC_REGADDR() \
        unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
        if (addr >= 0x80) \
-               reg_addr += (info->port_num) * 32;
+               reg_addr += (info->port_num) * 32; \
+       else if (addr >= 0x40)  \
+               reg_addr += (info->port_num) * 16;
 
 static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
 {
@@ -4182,7 +4273,13 @@ static void sync_mode(struct slgt_info *info)
 
        /* TCR (tx control)
         *
-        * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+        * 15..13  mode
+        *         000=HDLC/SDLC
+        *         001=raw bit synchronous
+        *         010=asynchronous/isochronous
+        *         011=monosync byte synchronous
+        *         100=bisync byte synchronous
+        *         101=xsync byte synchronous
         * 12..10  encoding
         * 09      CRC enable
         * 08      CRC32
@@ -4197,6 +4294,9 @@ static void sync_mode(struct slgt_info *info)
        val = BIT2;
 
        switch(info->params.mode) {
+       case MGSL_MODE_XSYNC:
+               val |= BIT15 + BIT13;
+               break;
        case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
        case MGSL_MODE_BISYNC:   val |= BIT15; break;
        case MGSL_MODE_RAW:      val |= BIT13; break;
@@ -4251,7 +4351,13 @@ static void sync_mode(struct slgt_info *info)
 
        /* RCR (rx control)
         *
-        * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+        * 15..13  mode
+        *         000=HDLC/SDLC
+        *         001=raw bit synchronous
+        *         010=asynchronous/isochronous
+        *         011=monosync byte synchronous
+        *         100=bisync byte synchronous
+        *         101=xsync byte synchronous
         * 12..10  encoding
         * 09      CRC enable
         * 08      CRC32
@@ -4263,6 +4369,9 @@ static void sync_mode(struct slgt_info *info)
        val = 0;
 
        switch(info->params.mode) {
+       case MGSL_MODE_XSYNC:
+               val |= BIT15 + BIT13;
+               break;
        case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
        case MGSL_MODE_BISYNC:   val |= BIT15; break;
        case MGSL_MODE_RAW:      val |= BIT13; break;
@@ -4679,6 +4788,7 @@ static bool rx_get_buf(struct slgt_info *info)
        switch(info->params.mode) {
        case MGSL_MODE_MONOSYNC:
        case MGSL_MODE_BISYNC:
+       case MGSL_MODE_XSYNC:
                /* ignore residue in byte synchronous modes */
                if (desc_residue(info->rbufs[i]))
                        count--;
index 38df8c19e74cc56903d5985cdbee7d52df3dc0d9..6b68a0fb461178a671a4dd1c1f74be0c10216fbf 100644 (file)
@@ -503,6 +503,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        struct kbd_struct * kbd;
        unsigned int console;
        unsigned char ucval;
+       unsigned int uival;
        void __user *up = (void __user *)arg;
        int i, perm;
        int ret = 0;
@@ -657,7 +658,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGETMODE:
-               ucval = vc->vc_mode;
+               uival = vc->vc_mode;
                goto setint;
 
        case KDMAPDISP:
@@ -695,7 +696,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGKBMODE:
-               ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
+               uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
                                 (kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
                                 (kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
                                 K_XLATE);
@@ -717,9 +718,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGKBMETA:
-               ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+               uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
        setint:
-               ret = put_user(ucval, (int __user *)arg);
+               ret = put_user(uival, (int __user *)arg);
                break;
 
        case KDGETKEYCODE:
@@ -949,7 +950,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                for (i = 0; i < MAX_NR_CONSOLES; ++i)
                        if (! VT_IS_IN_USE(i))
                                break;
-               ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
+               uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
                goto setint;             
 
        /*
index 210338ea222f38e408d093f2aceaf9e0c539e5bc..81270d221e5ae5aac86b3fb676bc5ede51e9a7f5 100644 (file)
 #include <linux/connector.h>
 #include <linux/delay.h>
 
-
-/*
- * This job is sent to the kevent workqueue.
- * While no event is once sent to any callback, the connector workqueue
- * is not created to avoid a useless waiting kernel task.
- * Once the first event is received, we create this dedicated workqueue which
- * is necessary because the flow of data can be high and we don't want
- * to encumber keventd with that.
- */
-static void cn_queue_create(struct work_struct *work)
-{
-       struct cn_queue_dev *dev;
-
-       dev = container_of(work, struct cn_queue_dev, wq_creation);
-
-       dev->cn_queue = create_singlethread_workqueue(dev->name);
-       /* If we fail, we will use keventd for all following connector jobs */
-       WARN_ON(!dev->cn_queue);
-}
-
-/*
- * Queue a data sent to a callback.
- * If the connector workqueue is already created, we queue the job on it.
- * Otherwise, we queue the job to kevent and queue the connector workqueue
- * creation too.
- */
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
-{
-       struct cn_queue_dev *pdev = cbq->pdev;
-
-       if (likely(pdev->cn_queue))
-               return queue_work(pdev->cn_queue, work);
-
-       /* Don't create the connector workqueue twice */
-       if (atomic_inc_return(&pdev->wq_requested) == 1)
-               schedule_work(&pdev->wq_creation);
-       else
-               atomic_dec(&pdev->wq_requested);
-
-       return schedule_work(work);
-}
-
 void cn_queue_wrapper(struct work_struct *work)
 {
        struct cn_callback_entry *cbq =
@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
 
 static void cn_queue_free_callback(struct cn_callback_entry *cbq)
 {
-       /* The first jobs have been sent to kevent, flush them too */
-       flush_scheduled_work();
-       if (cbq->pdev->cn_queue)
-               flush_workqueue(cbq->pdev->cn_queue);
-
+       flush_workqueue(cbq->pdev->cn_queue);
        kfree(cbq);
 }
 
@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
        atomic_set(&dev->refcnt, 0);
        INIT_LIST_HEAD(&dev->queue_list);
        spin_lock_init(&dev->queue_lock);
-       init_waitqueue_head(&dev->wq_created);
 
        dev->nls = nls;
 
-       INIT_WORK(&dev->wq_creation, cn_queue_create);
+       dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
+       if (!dev->cn_queue) {
+               kfree(dev);
+               return NULL;
+       }
 
        return dev;
 }
@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
 void cn_queue_free_dev(struct cn_queue_dev *dev)
 {
        struct cn_callback_entry *cbq, *n;
-       long timeout;
-       DEFINE_WAIT(wait);
-
-       /* Flush the first pending jobs queued on kevent */
-       flush_scheduled_work();
-
-       /* If the connector workqueue creation is still pending, wait for it */
-       prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
-       if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
-               timeout = schedule_timeout(HZ * 2);
-               if (!timeout && !dev->cn_queue)
-                       WARN_ON(1);
-       }
-       finish_wait(&dev->wq_created, &wait);
 
-       if (dev->cn_queue) {
-               flush_workqueue(dev->cn_queue);
-               destroy_workqueue(dev->cn_queue);
-       }
+       flush_workqueue(dev->cn_queue);
+       destroy_workqueue(dev->cn_queue);
 
        spin_lock_bh(&dev->queue_lock);
        list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
index 1d48f40342cbfadd6834b5edaf06d39dddabe315..e16c3fa8d2e3e3649a6cab79fb34db9323aae711 100644 (file)
@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
                                        __cbq->data.skb == NULL)) {
                                __cbq->data.skb = skb;
 
-                               if (queue_cn_work(__cbq, &__cbq->work))
+                               if (queue_work(dev->cbdev->cn_queue,
+                                              &__cbq->work))
                                        err = 0;
                                else
                                        err = -EINVAL;
@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
                                        d->callback = __cbq->data.callback;
                                        d->free = __new_cbq;
 
-                                       __new_cbq->pdev = __cbq->pdev;
-
                                        INIT_WORK(&__new_cbq->work,
                                                        &cn_queue_wrapper);
 
-                                       if (queue_cn_work(__new_cbq,
-                                                   &__new_cbq->work))
+                                       if (queue_work(dev->cbdev->cn_queue,
+                                                      &__new_cbq->work))
                                                err = 0;
                                        else {
                                                kfree(__new_cbq);
index 9520cf02edc840f51c6aef659a9dbd5ef6a42806..79d1542f31c0aaaff95c114a63bdb2135f8524ee 100644 (file)
@@ -46,15 +46,22 @@ config INTEL_MID_DMAC
 
          If unsure, say N.
 
-config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+config ASYNC_TX_ENABLE_CHANNEL_SWITCH
        bool
 
+config AMBA_PL08X
+       bool "ARM PrimeCell PL080 or PL081 support"
+       depends on ARM_AMBA && EXPERIMENTAL
+       select DMA_ENGINE
+       help
+         Platform has a PL08x DMAC device
+         which can provide DMA engine support
+
 config INTEL_IOATDMA
        tristate "Intel I/OAT DMA support"
        depends on PCI && X86
        select DMA_ENGINE
        select DCA
-       select ASYNC_TX_DISABLE_CHANNEL_SWITCH
        select ASYNC_TX_DISABLE_PQ_VAL_DMA
        select ASYNC_TX_DISABLE_XOR_VAL_DMA
        help
@@ -69,6 +76,7 @@ config INTEL_IOP_ADMA
        tristate "Intel IOP ADMA support"
        depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
        select DMA_ENGINE
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        help
          Enable support for the Intel(R) IOP Series RAID engines.
 
@@ -93,6 +101,7 @@ config FSL_DMA
        tristate "Freescale Elo and Elo Plus DMA support"
        depends on FSL_SOC
        select DMA_ENGINE
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        ---help---
          Enable support for the Freescale Elo and Elo Plus DMA controllers.
          The Elo is the DMA controller on some 82xx and 83xx parts, and the
@@ -109,6 +118,7 @@ config MV_XOR
        bool "Marvell XOR engine support"
        depends on PLAT_ORION
        select DMA_ENGINE
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        ---help---
          Enable support for the Marvell XOR engine.
 
@@ -166,6 +176,7 @@ config AMCC_PPC440SPE_ADMA
        depends on 440SPe || 440SP
        select DMA_ENGINE
        select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        help
          Enable support for the AMCC PPC440SPe RAID engines.
 
@@ -195,6 +206,22 @@ config PCH_DMA
        help
          Enable support for the Topcliff PCH DMA engine.
 
+config IMX_SDMA
+       tristate "i.MX SDMA support"
+       depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+       select DMA_ENGINE
+       help
+         Support the i.MX SDMA engine. This engine is integrated into
+         Freescale i.MX25/31/35/51 chips.
+
+config IMX_DMA
+       tristate "i.MX DMA support"
+       depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
+       select DMA_ENGINE
+       help
+         Support the i.MX DMA engine. This engine is integrated into
+         Freescale i.MX1/21/27 chips.
+
 config DMA_ENGINE
        bool
 
index 72bd70384d8a23b4b0f996eb6c12c54e99abab84..a8a84f4587f20d7437af9ce12932dbf8e92a4e1b 100644 (file)
@@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
 obj-$(CONFIG_SH_DMAE) += shdma.o
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
new file mode 100644 (file)
index 0000000..b605cc9
--- /dev/null
@@ -0,0 +1,2167 @@
+/*
+ * Copyright (c) 2006 ARM Ltd.
+ * Copyright (c) 2010 ST-Ericsson SA
+ *
+ * Author: Peter Pearse <peter.pearse@arm.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is iin this distribution in the
+ * file called COPYING.
+ *
+ * Documentation: ARM DDI 0196G == PL080
+ * Documentation: ARM DDI 0218E        == PL081
+ *
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
+ * any channel.
+ *
+ * The PL080 has 8 channels available for simultaneous use, and the PL081
+ * has only two channels. So on these DMA controllers the number of channels
+ * and the number of incoming DMA signals are two totally different things.
+ * It is usually not possible to theoretically handle all physical signals,
+ * so a multiplexing scheme with possible denial of use is necessary.
+ *
+ * The PL080 has a dual bus master, PL081 has a single master.
+ *
+ * Memory to peripheral transfer may be visualized as
+ *     Get data from memory to DMAC
+ *     Until no data left
+ *             On burst request from peripheral
+ *                     Destination burst from DMAC to peripheral
+ *                     Clear burst request
+ *     Raise terminal count interrupt
+ *
+ * For peripherals with a FIFO:
+ * Source      burst size == half the depth of the peripheral FIFO
+ * Destination burst size == the depth of the peripheral FIFO
+ *
+ * (Bursts are irrelevant for mem to mem transfers - there are no burst
+ * signals, the DMA controller will simply facilitate its AHB master.)
+ *
+ * ASSUMES default (little) endianness for DMA transfers
+ *
+ * Only DMAC flow control is implemented
+ *
+ * Global TODO:
+ * - Break out common code from arch/arm/mach-s3c64xx and share
+ */
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/amba/bus.h>
+#include <linux/dmaengine.h>
+#include <linux/amba/pl08x.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/hardware/pl080.h>
+#include <asm/dma.h>
+#include <asm/mach/dma.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+
+#define DRIVER_NAME    "pl08xdmac"
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL08x derivates
+ * @name: the name of this specific variant
+ * @channels: the number of channels available in this variant
+ * @dualmaster: whether this version supports dual AHB masters
+ * or not.
+ */
+struct vendor_data {
+       char *name;
+       u8 channels;
+       bool dualmaster;
+};
+
+/*
+ * PL08X private data structures
+ * An LLI struct - see pl08x TRM
+ * Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info
+ * is in cctl
+ */
+struct lli {
+       dma_addr_t src;
+       dma_addr_t dst;
+       dma_addr_t next;
+       u32 cctl;
+};
+
+/**
+ * struct pl08x_driver_data - the local state holder for the PL08x
+ * @slave: slave engine for this instance
+ * @memcpy: memcpy engine for this instance
+ * @base: virtual memory base (remapped) for the PL08x
+ * @adev: the corresponding AMBA (PrimeCell) bus entry
+ * @vd: vendor data for this PL08x variant
+ * @pd: platform data passed in from the platform/machine
+ * @phy_chans: array of data for the physical channels
+ * @pool: a pool for the LLI descriptors
+ * @pool_ctr: counter of LLIs in the pool
+ * @lock: a spinlock for this struct
+ */
+struct pl08x_driver_data {
+       struct dma_device slave;
+       struct dma_device memcpy;
+       void __iomem *base;
+       struct amba_device *adev;
+       struct vendor_data *vd;
+       struct pl08x_platform_data *pd;
+       struct pl08x_phy_chan *phy_chans;
+       struct dma_pool *pool;
+       int pool_ctr;
+       spinlock_t lock;
+};
+
+/*
+ * PL08X specific defines
+ */
+
+/*
+ * Memory boundaries: the manual for PL08x says that the controller
+ * cannot read past a 1KiB boundary, so these defines are used to
+ * create transfer LLIs that do not cross such boundaries.
+ */
+#define PL08X_BOUNDARY_SHIFT           (10)    /* 1KB 0x400 */
+#define PL08X_BOUNDARY_SIZE            (1 << PL08X_BOUNDARY_SHIFT)
+
+/* Minimum period between work queue runs */
+#define PL08X_WQ_PERIODMIN     20
+
+/* Size (bytes) of each LLI buffer allocated for one transfer */
+# define PL08X_LLI_TSFR_SIZE   0x2000
+
+/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+#define PL08X_MAX_ALLOCS       0x40
+#define MAX_NUM_TSFR_LLIS      (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define PL08X_ALIGN            8
+
+static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct pl08x_dma_chan, chan);
+}
+
+/*
+ * Physical channel handling
+ */
+
+/* Whether a certain channel is busy or not */
+static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
+{
+       unsigned int val;
+
+       val = readl(ch->base + PL080_CH_CONFIG);
+       return val & PL080_CONFIG_ACTIVE;
+}
+
+/*
+ * Set the initial DMA register values i.e. those for the first LLI
+ * The next lli pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed
+ */
+static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
+                           struct pl08x_phy_chan *ch)
+{
+       /* Wait for channel inactive */
+       while (pl08x_phy_channel_busy(ch))
+               ;
+
+       dev_vdbg(&pl08x->adev->dev,
+               "WRITE channel %d: csrc=%08x, cdst=%08x, "
+                "cctl=%08x, clli=%08x, ccfg=%08x\n",
+               ch->id,
+               ch->csrc,
+               ch->cdst,
+               ch->cctl,
+               ch->clli,
+               ch->ccfg);
+
+       writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
+       writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
+       writel(ch->clli, ch->base + PL080_CH_LLI);
+       writel(ch->cctl, ch->base + PL080_CH_CONTROL);
+       writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
+}
+
+static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
+{
+       struct pl08x_channel_data *cd = plchan->cd;
+       struct pl08x_phy_chan *phychan = plchan->phychan;
+       struct pl08x_txd *txd = plchan->at;
+
+       /* Copy the basic control register calculated at transfer config */
+       phychan->csrc = txd->csrc;
+       phychan->cdst = txd->cdst;
+       phychan->clli = txd->clli;
+       phychan->cctl = txd->cctl;
+
+       /* Assign the signal to the proper control registers */
+       phychan->ccfg = cd->ccfg;
+       phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
+       phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
+       /* If it wasn't set from AMBA, ignore it */
+       if (txd->direction == DMA_TO_DEVICE)
+               /* Select signal as destination */
+               phychan->ccfg |=
+                       (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
+       else if (txd->direction == DMA_FROM_DEVICE)
+               /* Select signal as source */
+               phychan->ccfg |=
+                       (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
+       /* Always enable error interrupts */
+       phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
+       /* Always enable terminal interrupts */
+       phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
+}
+
+/*
+ * Enable the DMA channel
+ * Assumes all other configuration bits have been set
+ * as desired before this code is called
+ */
+static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
+                                 struct pl08x_phy_chan *ch)
+{
+       u32 val;
+
+       /*
+        * Do not access config register until channel shows as disabled
+        */
+       while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
+               ;
+
+       /*
+        * Do not access config register until channel shows as inactive
+        */
+       val = readl(ch->base + PL080_CH_CONFIG);
+       while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
+               val = readl(ch->base + PL080_CH_CONFIG);
+
+       writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+}
+
+/*
+ * Overall DMAC remains enabled always.
+ *
+ * Disabling individual channels could lose data.
+ *
+ * Disable the peripheral DMA after disabling the DMAC
+ * in order to allow the DMAC FIFO to drain, and
+ * hence allow the channel to show inactive
+ *
+ */
+static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
+{
+       u32 val;
+
+       /* Set the HALT bit and wait for the FIFO to drain */
+       val = readl(ch->base + PL080_CH_CONFIG);
+       val |= PL080_CONFIG_HALT;
+       writel(val, ch->base + PL080_CH_CONFIG);
+
+       /* Wait for channel inactive */
+       while (pl08x_phy_channel_busy(ch))
+               ;
+}
+
+static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
+{
+       u32 val;
+
+       /* Clear the HALT bit */
+       val = readl(ch->base + PL080_CH_CONFIG);
+       val &= ~PL080_CONFIG_HALT;
+       writel(val, ch->base + PL080_CH_CONFIG);
+}
+
+
+/* Stops the channel */
+static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+{
+       u32 val;
+
+       pl08x_pause_phy_chan(ch);
+
+       /* Disable channel */
+       val = readl(ch->base + PL080_CH_CONFIG);
+       val &= ~PL080_CONFIG_ENABLE;
+       val &= ~PL080_CONFIG_ERR_IRQ_MASK;
+       val &= ~PL080_CONFIG_TC_IRQ_MASK;
+       writel(val, ch->base + PL080_CH_CONFIG);
+}
+
+static inline u32 get_bytes_in_cctl(u32 cctl)
+{
+       /* The source width defines the number of bytes */
+       u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+       switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+       case PL080_WIDTH_8BIT:
+               break;
+       case PL080_WIDTH_16BIT:
+               bytes *= 2;
+               break;
+       case PL080_WIDTH_32BIT:
+               bytes *= 4;
+               break;
+       }
+       return bytes;
+}
+
+/* The channel should be paused when calling this */
+static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
+{
+       struct pl08x_phy_chan *ch;
+       struct pl08x_txd *txdi = NULL;
+       struct pl08x_txd *txd;
+       unsigned long flags;
+       u32 bytes = 0;
+
+       spin_lock_irqsave(&plchan->lock, flags);
+
+       ch = plchan->phychan;
+       txd = plchan->at;
+
+       /*
+        * Next follow the LLIs to get the number of pending bytes in the
+        * currently active transaction.
+        */
+       if (ch && txd) {
+               struct lli *llis_va = txd->llis_va;
+               struct lli *llis_bus = (struct lli *) txd->llis_bus;
+               u32 clli = readl(ch->base + PL080_CH_LLI);
+
+               /* First get the bytes in the current active LLI */
+               bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
+
+               if (clli) {
+                       int i = 0;
+
+                       /* Forward to the LLI pointed to by clli */
+                       while ((clli != (u32) &(llis_bus[i])) &&
+                              (i < MAX_NUM_TSFR_LLIS))
+                               i++;
+
+                       while (clli) {
+                               bytes += get_bytes_in_cctl(llis_va[i].cctl);
+                               /*
+                                * A clli of 0x00000000 will terminate the
+                                * LLI list
+                                */
+                               clli = llis_va[i].next;
+                               i++;
+                       }
+               }
+       }
+
+       /* Sum up all queued transactions */
+       if (!list_empty(&plchan->desc_list)) {
+               list_for_each_entry(txdi, &plchan->desc_list, node) {
+                       bytes += txdi->len;
+               }
+
+       }
+
+       spin_unlock_irqrestore(&plchan->lock, flags);
+
+       return bytes;
+}
+
+/*
+ * Allocate a physical channel for a virtual channel
+ */
+static struct pl08x_phy_chan *
+pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
+                     struct pl08x_dma_chan *virt_chan)
+{
+       struct pl08x_phy_chan *ch = NULL;
+       unsigned long flags;
+       int i;
+
+       /*
+        * Try to locate a physical channel to be used for
+        * this transfer. If all are taken return NULL and
+        * the requester will have to cope by using some fallback
+        * PIO mode or retrying later.
+        */
+       for (i = 0; i < pl08x->vd->channels; i++) {
+               ch = &pl08x->phy_chans[i];
+
+               spin_lock_irqsave(&ch->lock, flags);
+
+               if (!ch->serving) {
+                       ch->serving = virt_chan;
+                       ch->signal = -1;
+                       spin_unlock_irqrestore(&ch->lock, flags);
+                       break;
+               }
+
+               spin_unlock_irqrestore(&ch->lock, flags);
+       }
+
+       if (i == pl08x->vd->channels) {
+               /* No physical channel available, cope with it */
+               return NULL;
+       }
+
+       return ch;
+}
+
+static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
+                                        struct pl08x_phy_chan *ch)
+{
+       unsigned long flags;
+
+       /* Stop the channel and clear its interrupts */
+       pl08x_stop_phy_chan(ch);
+       writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
+       writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+
+       /* Mark it as free */
+       spin_lock_irqsave(&ch->lock, flags);
+       ch->serving = NULL;
+       spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+/*
+ * LLI handling
+ */
+
+static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
+{
+       switch (coded) {
+       case PL080_WIDTH_8BIT:
+               return 1;
+       case PL080_WIDTH_16BIT:
+               return 2;
+       case PL080_WIDTH_32BIT:
+               return 4;
+       default:
+               break;
+       }
+       BUG();
+       return 0;
+}
+
+static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
+                                 u32 tsize)
+{
+       u32 retbits = cctl;
+
+       /* Remove all src, dst and transfersize bits */
+       retbits &= ~PL080_CONTROL_DWIDTH_MASK;
+       retbits &= ~PL080_CONTROL_SWIDTH_MASK;
+       retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+       /* Then set the bits according to the parameters */
+       switch (srcwidth) {
+       case 1:
+               retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
+               break;
+       case 2:
+               retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
+               break;
+       case 4:
+               retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       switch (dstwidth) {
+       case 1:
+               retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+               break;
+       case 2:
+               retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+               break;
+       case 4:
+               retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
+       return retbits;
+}
+
+/*
+ * Autoselect a master bus to use for the transfer
+ * this prefers the destination bus if both available
+ * if fixed address on one bus the other will be chosen
+ */
+void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
+       struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
+       struct pl08x_bus_data **sbus, u32 cctl)
+{
+       if (!(cctl & PL080_CONTROL_DST_INCR)) {
+               *mbus = src_bus;
+               *sbus = dst_bus;
+       } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+               *mbus = dst_bus;
+               *sbus = src_bus;
+       } else {
+               if (dst_bus->buswidth == 4) {
+                       *mbus = dst_bus;
+                       *sbus = src_bus;
+               } else if (src_bus->buswidth == 4) {
+                       *mbus = src_bus;
+                       *sbus = dst_bus;
+               } else if (dst_bus->buswidth == 2) {
+                       *mbus = dst_bus;
+                       *sbus = src_bus;
+               } else if (src_bus->buswidth == 2) {
+                       *mbus = src_bus;
+                       *sbus = dst_bus;
+               } else {
+                       /* src_bus->buswidth == 1 */
+                       *mbus = dst_bus;
+                       *sbus = src_bus;
+               }
+       }
+}
+
+/*
+ * Fills in one LLI for a certain transfer descriptor
+ * and advance the counter
+ */
+int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+                           struct pl08x_txd *txd, int num_llis, int len,
+                           u32 cctl, u32 *remainder)
+{
+       struct lli *llis_va = txd->llis_va;
+       struct lli *llis_bus = (struct lli *) txd->llis_bus;
+
+       BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
+
+       llis_va[num_llis].cctl          = cctl;
+       llis_va[num_llis].src           = txd->srcbus.addr;
+       llis_va[num_llis].dst           = txd->dstbus.addr;
+
+       /*
+        * On versions with dual masters, you can optionally AND on
+        * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
+        * in new LLIs with that controller, but we always try to
+        * choose AHB1 to point into memory. The idea is to have AHB2
+        * fixed on the peripheral and AHB1 messing around in the
+        * memory. So we don't manipulate this bit currently.
+        */
+
+       llis_va[num_llis].next =
+               (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+
+       if (cctl & PL080_CONTROL_SRC_INCR)
+               txd->srcbus.addr += len;
+       if (cctl & PL080_CONTROL_DST_INCR)
+               txd->dstbus.addr += len;
+
+       *remainder -= len;
+
+       return num_llis + 1;
+}
+
+/*
+ * Return number of bytes to fill to boundary, or len
+ */
+static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+{
+       u32 boundary;
+
+       boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
+               << PL08X_BOUNDARY_SHIFT;
+
+       if (boundary < addr + len)
+               return boundary - addr;
+       else
+               return len;
+}
+
+/*
+ * This fills in the table of LLIs for the transfer descriptor
+ * Note that we assume we never have to change the burst sizes
+ * Return 0 for error
+ */
+static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
+                             struct pl08x_txd *txd)
+{
+       struct pl08x_channel_data *cd = txd->cd;
+       struct pl08x_bus_data *mbus, *sbus;
+       u32 remainder;
+       int num_llis = 0;
+       u32 cctl;
+       int max_bytes_per_lli;
+       int total_bytes = 0;
+       struct lli *llis_va;
+       struct lli *llis_bus;
+
+       if (!txd) {
+               dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
+               return 0;
+       }
+
+       txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
+                                     &txd->llis_bus);
+       if (!txd->llis_va) {
+               dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
+               return 0;
+       }
+
+       pl08x->pool_ctr++;
+
+       /*
+        * Initialize bus values for this transfer
+        * from the passed optimal values
+        */
+       if (!cd) {
+               dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
+               return 0;
+       }
+
+       /* Get the default CCTL from the platform data */
+       cctl = cd->cctl;
+
+       /*
+        * On the PL080 we have two bus masters and we
+        * should select one for source and one for
+        * destination. We try to use AHB2 for the
+        * bus which does not increment (typically the
+        * peripheral) else we just choose something.
+        */
+       cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+       if (pl08x->vd->dualmaster) {
+               if (cctl & PL080_CONTROL_SRC_INCR)
+                       /* Source increments, use AHB2 for destination */
+                       cctl |= PL080_CONTROL_DST_AHB2;
+               else if (cctl & PL080_CONTROL_DST_INCR)
+                       /* Destination increments, use AHB2 for source */
+                       cctl |= PL080_CONTROL_SRC_AHB2;
+               else
+                       /* Just pick something, source AHB1 dest AHB2 */
+                       cctl |= PL080_CONTROL_DST_AHB2;
+       }
+
+       /* Find maximum width of the source bus */
+       txd->srcbus.maxwidth =
+               pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
+                                      PL080_CONTROL_SWIDTH_SHIFT);
+
+       /* Find maximum width of the destination bus */
+       txd->dstbus.maxwidth =
+               pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
+                                      PL080_CONTROL_DWIDTH_SHIFT);
+
+       /* Set up the bus widths to the maximum */
+       txd->srcbus.buswidth = txd->srcbus.maxwidth;
+       txd->dstbus.buswidth = txd->dstbus.maxwidth;
+       dev_vdbg(&pl08x->adev->dev,
+                "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
+                __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+
+
+       /*
+        * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
+        */
+       max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+               PL080_CONTROL_TRANSFER_SIZE_MASK;
+       dev_vdbg(&pl08x->adev->dev,
+                "%s max bytes per lli = %d\n",
+                __func__, max_bytes_per_lli);
+
+       /* We need to count this down to zero */
+       remainder = txd->len;
+       dev_vdbg(&pl08x->adev->dev,
+                "%s remainder = %d\n",
+                __func__, remainder);
+
+       /*
+        * Choose bus to align to
+        * - prefers destination bus if both available
+        * - if fixed address on one bus chooses other
+        * - modifies cctl to choose an apropriate master
+        */
+       pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
+                               &mbus, &sbus, cctl);
+
+
+       /*
+        * The lowest bit of the LLI register
+        * is also used to indicate which master to
+        * use for reading the LLIs.
+        */
+
+       if (txd->len < mbus->buswidth) {
+               /*
+                * Less than a bus width available
+                * - send as single bytes
+                */
+               while (remainder) {
+                       dev_vdbg(&pl08x->adev->dev,
+                                "%s single byte LLIs for a transfer of "
+                                "less than a bus width (remain %08x)\n",
+                                __func__, remainder);
+                       cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+                       num_llis =
+                               pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
+                                       cctl, &remainder);
+                       total_bytes++;
+               }
+       } else {
+               /*
+                *  Make one byte LLIs until master bus is aligned
+                *  - slave will then be aligned also
+                */
+               while ((mbus->addr) % (mbus->buswidth)) {
+                       dev_vdbg(&pl08x->adev->dev,
+                               "%s adjustment lli for less than bus width "
+                                "(remain %08x)\n",
+                                __func__, remainder);
+                       cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+                       num_llis = pl08x_fill_lli_for_desc
+                               (pl08x, txd, num_llis, 1, cctl, &remainder);
+                       total_bytes++;
+               }
+
+               /*
+                *  Master now aligned
+                * - if slave is not then we must set its width down
+                */
+               if (sbus->addr % sbus->buswidth) {
+                       dev_dbg(&pl08x->adev->dev,
+                               "%s set down bus width to one byte\n",
+                                __func__);
+
+                       sbus->buswidth = 1;
+               }
+
+               /*
+                * Make largest possible LLIs until less than one bus
+                * width left
+                */
+               while (remainder > (mbus->buswidth - 1)) {
+                       int lli_len, target_len;
+                       int tsize;
+                       int odd_bytes;
+
+                       /*
+                        * If enough left try to send max possible,
+                        * otherwise try to send the remainder
+                        */
+                       target_len = remainder;
+                       if (remainder > max_bytes_per_lli)
+                               target_len = max_bytes_per_lli;
+
+                       /*
+                        * Set bus lengths for incrementing busses
+                        * to number of bytes which fill to next memory
+                        * boundary
+                        */
+                       if (cctl & PL080_CONTROL_SRC_INCR)
+                               txd->srcbus.fill_bytes =
+                                       pl08x_pre_boundary(
+                                               txd->srcbus.addr,
+                                               remainder);
+                       else
+                               txd->srcbus.fill_bytes =
+                                       max_bytes_per_lli;
+
+                       if (cctl & PL080_CONTROL_DST_INCR)
+                               txd->dstbus.fill_bytes =
+                                       pl08x_pre_boundary(
+                                               txd->dstbus.addr,
+                                               remainder);
+                       else
+                               txd->dstbus.fill_bytes =
+                                               max_bytes_per_lli;
+
+                       /*
+                        *  Find the nearest
+                        */
+                       lli_len = min(txd->srcbus.fill_bytes,
+                               txd->dstbus.fill_bytes);
+
+                       BUG_ON(lli_len > remainder);
+
+                       if (lli_len <= 0) {
+                               dev_err(&pl08x->adev->dev,
+                                       "%s lli_len is %d, <= 0\n",
+                                               __func__, lli_len);
+                               return 0;
+                       }
+
+                       if (lli_len == target_len) {
+                               /*
+                                * Can send what we wanted
+                                */
+                               /*
+                                *  Maintain alignment
+                                */
+                               lli_len = (lli_len/mbus->buswidth) *
+                                                       mbus->buswidth;
+                               odd_bytes = 0;
+                       } else {
+                               /*
+                                * So now we know how many bytes to transfer
+                                * to get to the nearest boundary
+                                * The next lli will past the boundary
+                                * - however we may be working to a boundary
+                                *   on the slave bus
+                                *   We need to ensure the master stays aligned
+                                */
+                               odd_bytes = lli_len % mbus->buswidth;
+                               /*
+                                * - and that we are working in multiples
+                                *   of the bus widths
+                                */
+                               lli_len -= odd_bytes;
+
+                       }
+
+                       if (lli_len) {
+                               /*
+                                * Check against minimum bus alignment:
+                                * Calculate actual transfer size in relation
+                                * to bus width an get a maximum remainder of
+                                * the smallest bus width - 1
+                                */
+                               /* FIXME: use round_down()? */
+                               tsize = lli_len / min(mbus->buswidth,
+                                                     sbus->buswidth);
+                               lli_len = tsize * min(mbus->buswidth,
+                                                     sbus->buswidth);
+
+                               if (target_len != lli_len) {
+                                       dev_vdbg(&pl08x->adev->dev,
+                                       "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+                                       __func__, target_len, lli_len, txd->len);
+                               }
+
+                               cctl = pl08x_cctl_bits(cctl,
+                                                      txd->srcbus.buswidth,
+                                                      txd->dstbus.buswidth,
+                                                      tsize);
+
+                               dev_vdbg(&pl08x->adev->dev,
+                                       "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
+                                       __func__, lli_len, remainder);
+                               num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
+                                               num_llis, lli_len, cctl,
+                                               &remainder);
+                               total_bytes += lli_len;
+                       }
+
+
+                       if (odd_bytes) {
+                               /*
+                                * Creep past the boundary,
+                                * maintaining master alignment
+                                */
+                               int j;
+                               for (j = 0; (j < mbus->buswidth)
+                                               && (remainder); j++) {
+                                       cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+                                       dev_vdbg(&pl08x->adev->dev,
+                                               "%s align with boundardy, single byte (remain %08x)\n",
+                                               __func__, remainder);
+                                       num_llis =
+                                               pl08x_fill_lli_for_desc(pl08x,
+                                                       txd, num_llis, 1,
+                                                       cctl, &remainder);
+                                       total_bytes++;
+                               }
+                       }
+               }
+
+               /*
+                * Send any odd bytes
+                */
+               if (remainder < 0) {
+                       dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
+                                       __func__, remainder);
+                       return 0;
+               }
+
+               while (remainder) {
+                       cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+                       dev_vdbg(&pl08x->adev->dev,
+                               "%s align with boundardy, single odd byte (remain %d)\n",
+                               __func__, remainder);
+                       num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
+                                       1, cctl, &remainder);
+                       total_bytes++;
+               }
+       }
+       if (total_bytes != txd->len) {
+               dev_err(&pl08x->adev->dev,
+                       "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+                       __func__, total_bytes, txd->len);
+               return 0;
+       }
+
+       if (num_llis >= MAX_NUM_TSFR_LLIS) {
+               dev_err(&pl08x->adev->dev,
+                       "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+                       __func__, (u32) MAX_NUM_TSFR_LLIS);
+               return 0;
+       }
+       /*
+        * Decide whether this is a loop or a terminated transfer
+        */
+       llis_va = txd->llis_va;
+       llis_bus = (struct lli *) txd->llis_bus;
+
+       if (cd->circular_buffer) {
+               /*
+                * Loop the circular buffer so that the next element
+                * points back to the beginning of the LLI.
+                */
+               llis_va[num_llis - 1].next =
+                       (dma_addr_t)((unsigned int)&(llis_bus[0]));
+       } else {
+               /*
+                * On non-circular buffers, the final LLI terminates
+                * the LLI.
+                */
+               llis_va[num_llis - 1].next = 0;
+               /*
+                * The final LLI element shall also fire an interrupt
+                */
+               llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+       }
+
+       /* Now store the channel register values */
+       txd->csrc = llis_va[0].src;
+       txd->cdst = llis_va[0].dst;
+       if (num_llis > 1)
+               txd->clli = llis_va[0].next;
+       else
+               txd->clli = 0;
+
+       txd->cctl = llis_va[0].cctl;
+       /* ccfg will be set at physical channel allocation time */
+
+#ifdef VERBOSE_DEBUG
+       {
+               int i;
+
+               for (i = 0; i < num_llis; i++) {
+                       dev_vdbg(&pl08x->adev->dev,
+                                "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+                                i,
+                                &llis_va[i],
+                                llis_va[i].src,
+                                llis_va[i].dst,
+                                llis_va[i].cctl,
+                                llis_va[i].next
+                               );
+               }
+       }
+#endif
+
+       return num_llis;
+}
+
+/* You should call this with the struct pl08x lock held */
+static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
+                          struct pl08x_txd *txd)
+{
+       if (!txd)
+               dev_err(&pl08x->adev->dev,
+                       "%s no descriptor to free\n",
+                       __func__);
+
+       /* Free the LLI */
+       dma_pool_free(pl08x->pool, txd->llis_va,
+                     txd->llis_bus);
+
+       pl08x->pool_ctr--;
+
+       kfree(txd);
+}
+
+static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
+                               struct pl08x_dma_chan *plchan)
+{
+       struct pl08x_txd *txdi = NULL;
+       struct pl08x_txd *next;
+
+       if (!list_empty(&plchan->desc_list)) {
+               list_for_each_entry_safe(txdi,
+                                        next, &plchan->desc_list, node) {
+                       list_del(&txdi->node);
+                       pl08x_free_txd(pl08x, txdi);
+               }
+
+       }
+}
+
+/*
+ * The DMA ENGINE API
+ */
+static int pl08x_alloc_chan_resources(struct dma_chan *chan)
+{
+       return 0;
+}
+
+static void pl08x_free_chan_resources(struct dma_chan *chan)
+{
+}
+
+/*
+ * This should be called with the channel plchan->lock held
+ */
+static int prep_phy_channel(struct pl08x_dma_chan *plchan,
+                           struct pl08x_txd *txd)
+{
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_phy_chan *ch;
+       int ret;
+
+       /* Check if we already have a channel */
+       if (plchan->phychan)
+               return 0;
+
+       ch = pl08x_get_phy_channel(pl08x, plchan);
+       if (!ch) {
+               /* No physical channel available, cope with it */
+               dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+               return -EBUSY;
+       }
+
+       /*
+        * OK we have a physical channel: for memcpy() this is all we
+        * need, but for slaves the physical signals may be muxed!
+        * Can the platform allow us to use this channel?
+        */
+       if (plchan->slave &&
+           ch->signal < 0 &&
+           pl08x->pd->get_signal) {
+               ret = pl08x->pd->get_signal(plchan);
+               if (ret < 0) {
+                       dev_dbg(&pl08x->adev->dev,
+                               "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
+                               ch->id, plchan->name);
+                       /* Release physical channel & return */
+                       pl08x_put_phy_channel(pl08x, ch);
+                       return -EBUSY;
+               }
+               ch->signal = ret;
+       }
+
+       dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
+                ch->id,
+                ch->signal,
+                plchan->name);
+
+       plchan->phychan = ch;
+
+       return 0;
+}
+
+static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+
+       atomic_inc(&plchan->last_issued);
+       tx->cookie = atomic_read(&plchan->last_issued);
+       /* This unlock follows the lock in the prep() function */
+       spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+
+       return tx->cookie;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
+               struct dma_chan *chan, unsigned long flags)
+{
+       struct dma_async_tx_descriptor *retval = NULL;
+
+       return retval;
+}
+
+/*
+ * Code accessing dma_async_is_complete() in a tight loop
+ * may give problems - could schedule where indicated.
+ * If slaves are relying on interrupts to signal completion this
+ * function must not be called with interrupts disabled
+ */
+static enum dma_status
+pl08x_dma_tx_status(struct dma_chan *chan,
+                   dma_cookie_t cookie,
+                   struct dma_tx_state *txstate)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       dma_cookie_t last_used;
+       dma_cookie_t last_complete;
+       enum dma_status ret;
+       u32 bytesleft = 0;
+
+       last_used = atomic_read(&plchan->last_issued);
+       last_complete = plchan->lc;
+
+       ret = dma_async_is_complete(cookie, last_complete, last_used);
+       if (ret == DMA_SUCCESS) {
+               dma_set_tx_state(txstate, last_complete, last_used, 0);
+               return ret;
+       }
+
+       /*
+        * schedule(); could be inserted here
+        */
+
+       /*
+        * This cookie not complete yet
+        */
+       last_used = atomic_read(&plchan->last_issued);
+       last_complete = plchan->lc;
+
+       /* Get number of bytes left in the active transactions and queue */
+       bytesleft = pl08x_getbytes_chan(plchan);
+
+       dma_set_tx_state(txstate, last_complete, last_used,
+                        bytesleft);
+
+       if (plchan->state == PL08X_CHAN_PAUSED)
+               return DMA_PAUSED;
+
+       /* Whether waiting or running, we're in progress */
+       return DMA_IN_PROGRESS;
+}
+
+/* PrimeCell DMA extension */
+struct burst_table {
+       int burstwords;
+       u32 reg;
+};
+
+static const struct burst_table burst_sizes[] = {
+       {
+               .burstwords = 256,
+               .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 128,
+               .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 64,
+               .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 32,
+               .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 16,
+               .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 8,
+               .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 4,
+               .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+       {
+               .burstwords = 1,
+               .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+       },
+};
+
+static void dma_set_runtime_config(struct dma_chan *chan,
+                              struct dma_slave_config *config)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_channel_data *cd = plchan->cd;
+       enum dma_slave_buswidth addr_width;
+       u32 maxburst;
+       u32 cctl = 0;
+       /* Mask out all except src and dst channel */
+       u32 ccfg = cd->ccfg & 0x000003DEU;
+       int i = 0;
+
+       /* Transfer direction */
+       plchan->runtime_direction = config->direction;
+       if (config->direction == DMA_TO_DEVICE) {
+               plchan->runtime_addr = config->dst_addr;
+               cctl |= PL080_CONTROL_SRC_INCR;
+               ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+               addr_width = config->dst_addr_width;
+               maxburst = config->dst_maxburst;
+       } else if (config->direction == DMA_FROM_DEVICE) {
+               plchan->runtime_addr = config->src_addr;
+               cctl |= PL080_CONTROL_DST_INCR;
+               ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+               addr_width = config->src_addr_width;
+               maxburst = config->src_maxburst;
+       } else {
+               dev_err(&pl08x->adev->dev,
+                       "bad runtime_config: alien transfer direction\n");
+               return;
+       }
+
+       switch (addr_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+                       (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+                       (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+                       (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
+               break;
+       default:
+               dev_err(&pl08x->adev->dev,
+                       "bad runtime_config: alien address width\n");
+               return;
+       }
+
+       /*
+        * Now decide on a maxburst:
+        * If this channel will only request single transfers, set
+        * this down to ONE element.
+        */
+       if (plchan->cd->single) {
+               cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
+                       (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+       } else {
+               while (i < ARRAY_SIZE(burst_sizes)) {
+                       if (burst_sizes[i].burstwords <= maxburst)
+                               break;
+                       i++;
+               }
+               cctl |= burst_sizes[i].reg;
+       }
+
+       /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+       cctl &= ~PL080_CONTROL_PROT_MASK;
+       cctl |= PL080_CONTROL_PROT_SYS;
+
+       /* Modify the default channel data to fit PrimeCell request */
+       cd->cctl = cctl;
+       cd->ccfg = ccfg;
+
+       dev_dbg(&pl08x->adev->dev,
+               "configured channel %s (%s) for %s, data width %d, "
+               "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+               dma_chan_name(chan), plchan->name,
+               (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+               addr_width,
+               maxburst,
+               cctl, ccfg);
+}
+
+/*
+ * Slave transactions callback to the slave device to allow
+ * synchronization of slave DMA signals with the DMAC enable
+ */
+static void pl08x_issue_pending(struct dma_chan *chan)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       unsigned long flags;
+
+       spin_lock_irqsave(&plchan->lock, flags);
+       /* Something is already active */
+       if (plchan->at) {
+                       spin_unlock_irqrestore(&plchan->lock, flags);
+                       return;
+       }
+
+       /* Didn't get a physical channel so waiting for it ... */
+       if (plchan->state == PL08X_CHAN_WAITING)
+               return;
+
+       /* Take the first element in the queue and execute it */
+       if (!list_empty(&plchan->desc_list)) {
+               struct pl08x_txd *next;
+
+               next = list_first_entry(&plchan->desc_list,
+                                       struct pl08x_txd,
+                                       node);
+               list_del(&next->node);
+               plchan->at = next;
+               plchan->state = PL08X_CHAN_RUNNING;
+
+               /* Configure the physical channel for the active txd */
+               pl08x_config_phychan_for_txd(plchan);
+               pl08x_set_cregs(pl08x, plchan->phychan);
+               pl08x_enable_phy_chan(pl08x, plchan->phychan);
+       }
+
+       spin_unlock_irqrestore(&plchan->lock, flags);
+}
+
+static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
+                                       struct pl08x_txd *txd)
+{
+       int num_llis;
+       struct pl08x_driver_data *pl08x = plchan->host;
+       int ret;
+
+       num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
+
+       if (!num_llis)
+               return -EINVAL;
+
+       spin_lock_irqsave(&plchan->lock, plchan->lockflags);
+
+       /*
+        * If this device is not using a circular buffer then
+        * queue this new descriptor for transfer.
+        * The descriptor for a circular buffer continues
+        * to be used until the channel is freed.
+        */
+       if (txd->cd->circular_buffer)
+               dev_err(&pl08x->adev->dev,
+                       "%s attempting to queue a circular buffer\n",
+                       __func__);
+       else
+               list_add_tail(&txd->node,
+                             &plchan->desc_list);
+
+       /*
+        * See if we already have a physical channel allocated,
+        * else this is the time to try to get one.
+        */
+       ret = prep_phy_channel(plchan, txd);
+       if (ret) {
+               /*
+                * No physical channel available, we will
+                * stack up the memcpy channels until there is a channel
+                * available to handle it whereas slave transfers may
+                * have been denied due to platform channel muxing restrictions
+                * and since there is no guarantee that this will ever be
+                * resolved, and since the signal must be aquired AFTER
+                * aquiring the physical channel, we will let them be NACK:ed
+                * with -EBUSY here. The drivers can alway retry the prep()
+                * call if they are eager on doing this using DMA.
+                */
+               if (plchan->slave) {
+                       pl08x_free_txd_list(pl08x, plchan);
+                       spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+                       return -EBUSY;
+               }
+               /* Do this memcpy whenever there is a channel ready */
+               plchan->state = PL08X_CHAN_WAITING;
+               plchan->waiting = txd;
+       } else
+               /*
+                * Else we're all set, paused and ready to roll,
+                * status will switch to PL08X_CHAN_RUNNING when
+                * we call issue_pending(). If there is something
+                * running on the channel already we don't change
+                * its state.
+                */
+               if (plchan->state == PL08X_CHAN_IDLE)
+                       plchan->state = PL08X_CHAN_PAUSED;
+
+       /*
+        * Notice that we leave plchan->lock locked on purpose:
+        * it will be unlocked in the subsequent tx_submit()
+        * call. This is a consequence of the current API.
+        */
+
+       return 0;
+}
+
+/*
+ * Initialize a descriptor to be used by memcpy submit
+ */
+static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
+               struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+               size_t len, unsigned long flags)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_txd *txd;
+       int ret;
+
+       txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+       if (!txd) {
+               dev_err(&pl08x->adev->dev,
+                       "%s no memory for descriptor\n", __func__);
+               return NULL;
+       }
+
+       dma_async_tx_descriptor_init(&txd->tx, chan);
+       txd->direction = DMA_NONE;
+       txd->srcbus.addr = src;
+       txd->dstbus.addr = dest;
+
+       /* Set platform data for m2m */
+       txd->cd = &pl08x->pd->memcpy_channel;
+       /* Both to be incremented or the code will break */
+       txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+       txd->tx.tx_submit = pl08x_tx_submit;
+       txd->tx.callback = NULL;
+       txd->tx.callback_param = NULL;
+       txd->len = len;
+
+       INIT_LIST_HEAD(&txd->node);
+       ret = pl08x_prep_channel_resources(plchan, txd);
+       if (ret)
+               return NULL;
+       /*
+        * NB: the channel lock is held at this point so tx_submit()
+        * must be called in direct succession.
+        */
+
+       return &txd->tx;
+}
+
+struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned long flags)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_txd *txd;
+       int ret;
+
+       /*
+        * Current implementation ASSUMES only one sg
+        */
+       if (sg_len != 1) {
+               dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
+                       __func__);
+               BUG();
+       }
+
+       dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+               __func__, sgl->length, plchan->name);
+
+       txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+       if (!txd) {
+               dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
+               return NULL;
+       }
+
+       dma_async_tx_descriptor_init(&txd->tx, chan);
+
+       if (direction != plchan->runtime_direction)
+               dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
+                       "the direction configured for the PrimeCell\n",
+                       __func__);
+
+       /*
+        * Set up addresses, the PrimeCell configured address
+        * will take precedence since this may configure the
+        * channel target address dynamically at runtime.
+        */
+       txd->direction = direction;
+       if (direction == DMA_TO_DEVICE) {
+               txd->srcbus.addr = sgl->dma_address;
+               if (plchan->runtime_addr)
+                       txd->dstbus.addr = plchan->runtime_addr;
+               else
+                       txd->dstbus.addr = plchan->cd->addr;
+       } else if (direction == DMA_FROM_DEVICE) {
+               if (plchan->runtime_addr)
+                       txd->srcbus.addr = plchan->runtime_addr;
+               else
+                       txd->srcbus.addr = plchan->cd->addr;
+               txd->dstbus.addr = sgl->dma_address;
+       } else {
+               dev_err(&pl08x->adev->dev,
+                       "%s direction unsupported\n", __func__);
+               return NULL;
+       }
+       txd->cd = plchan->cd;
+       txd->tx.tx_submit = pl08x_tx_submit;
+       txd->tx.callback = NULL;
+       txd->tx.callback_param = NULL;
+       txd->len = sgl->length;
+       INIT_LIST_HEAD(&txd->node);
+
+       ret = pl08x_prep_channel_resources(plchan, txd);
+       if (ret)
+               return NULL;
+       /*
+        * NB: the channel lock is held at this point so tx_submit()
+        * must be called in direct succession.
+        */
+
+       return &txd->tx;
+}
+
+static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+                        unsigned long arg)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       unsigned long flags;
+       int ret = 0;
+
+       /* Controls applicable to inactive channels */
+       if (cmd == DMA_SLAVE_CONFIG) {
+               dma_set_runtime_config(chan,
+                                      (struct dma_slave_config *)
+                                      arg);
+               return 0;
+       }
+
+       /*
+        * Anything succeeds on channels with no physical allocation and
+        * no queued transfers.
+        */
+       spin_lock_irqsave(&plchan->lock, flags);
+       if (!plchan->phychan && !plchan->at) {
+               spin_unlock_irqrestore(&plchan->lock, flags);
+               return 0;
+       }
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               plchan->state = PL08X_CHAN_IDLE;
+
+               if (plchan->phychan) {
+                       pl08x_stop_phy_chan(plchan->phychan);
+
+                       /*
+                        * Mark physical channel as free and free any slave
+                        * signal
+                        */
+                       if ((plchan->phychan->signal >= 0) &&
+                           pl08x->pd->put_signal) {
+                               pl08x->pd->put_signal(plchan);
+                               plchan->phychan->signal = -1;
+                       }
+                       pl08x_put_phy_channel(pl08x, plchan->phychan);
+                       plchan->phychan = NULL;
+               }
+               /* Stop any pending tasklet */
+               tasklet_disable(&plchan->tasklet);
+               /* Dequeue jobs and free LLIs */
+               if (plchan->at) {
+                       pl08x_free_txd(pl08x, plchan->at);
+                       plchan->at = NULL;
+               }
+               /* Dequeue jobs not yet fired as well */
+               pl08x_free_txd_list(pl08x, plchan);
+               break;
+       case DMA_PAUSE:
+               pl08x_pause_phy_chan(plchan->phychan);
+               plchan->state = PL08X_CHAN_PAUSED;
+               break;
+       case DMA_RESUME:
+               pl08x_resume_phy_chan(plchan->phychan);
+               plchan->state = PL08X_CHAN_RUNNING;
+               break;
+       default:
+               /* Unknown command */
+               ret = -ENXIO;
+               break;
+       }
+
+       spin_unlock_irqrestore(&plchan->lock, flags);
+
+       return ret;
+}
+
+bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
+{
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       char *name = chan_id;
+
+       /* Check that the channel is not taken! */
+       if (!strcmp(plchan->name, name))
+               return true;
+
+       return false;
+}
+
+/*
+ * Just check that the device is there and active
+ * TODO: turn this bit on/off depending on the number of
+ * physical channels actually used, if it is zero... well
+ * shut it off. That will save some power. Cut the clock
+ * at the same time.
+ */
+static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
+{
+       u32 val;
+
+       val = readl(pl08x->base + PL080_CONFIG);
+       val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
+       /* We implictly clear bit 1 and that means little-endian mode */
+       val |= PL080_CONFIG_ENABLE;
+       writel(val, pl08x->base + PL080_CONFIG);
+}
+
+static void pl08x_tasklet(unsigned long data)
+{
+       struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
+       struct pl08x_phy_chan *phychan = plchan->phychan;
+       struct pl08x_driver_data *pl08x = plchan->host;
+
+       if (!plchan)
+               BUG();
+
+       spin_lock(&plchan->lock);
+
+       if (plchan->at) {
+               dma_async_tx_callback callback =
+                       plchan->at->tx.callback;
+               void *callback_param =
+                       plchan->at->tx.callback_param;
+
+               /*
+                * Update last completed
+                */
+               plchan->lc =
+                       (plchan->at->tx.cookie);
+
+               /*
+                * Callback to signal completion
+                */
+               if (callback)
+                       callback(callback_param);
+
+               /*
+                * Device callbacks should NOT clear
+                * the current transaction on the channel
+                * Linus: sometimes they should?
+                */
+               if (!plchan->at)
+                       BUG();
+
+               /*
+                * Free the descriptor if it's not for a device
+                * using a circular buffer
+                */
+               if (!plchan->at->cd->circular_buffer) {
+                       pl08x_free_txd(pl08x, plchan->at);
+                       plchan->at = NULL;
+               }
+               /*
+                * else descriptor for circular
+                * buffers only freed when
+                * client has disabled dma
+                */
+       }
+       /*
+        * If a new descriptor is queued, set it up
+        * plchan->at is NULL here
+        */
+       if (!list_empty(&plchan->desc_list)) {
+               struct pl08x_txd *next;
+
+               next = list_first_entry(&plchan->desc_list,
+                                       struct pl08x_txd,
+                                       node);
+               list_del(&next->node);
+               plchan->at = next;
+               /* Configure the physical channel for the next txd */
+               pl08x_config_phychan_for_txd(plchan);
+               pl08x_set_cregs(pl08x, plchan->phychan);
+               pl08x_enable_phy_chan(pl08x, plchan->phychan);
+       } else {
+               struct pl08x_dma_chan *waiting = NULL;
+
+               /*
+                * No more jobs, so free up the physical channel
+                * Free any allocated signal on slave transfers too
+                */
+               if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
+                       pl08x->pd->put_signal(plchan);
+                       phychan->signal = -1;
+               }
+               pl08x_put_phy_channel(pl08x, phychan);
+               plchan->phychan = NULL;
+               plchan->state = PL08X_CHAN_IDLE;
+
+               /*
+                * And NOW before anyone else can grab that free:d
+                * up physical channel, see if there is some memcpy
+                * pending that seriously needs to start because of
+                * being stacked up while we were choking the
+                * physical channels with data.
+                */
+               list_for_each_entry(waiting, &pl08x->memcpy.channels,
+                                   chan.device_node) {
+                 if (waiting->state == PL08X_CHAN_WAITING &&
+                           waiting->waiting != NULL) {
+                               int ret;
+
+                               /* This should REALLY not fail now */
+                               ret = prep_phy_channel(waiting,
+                                                      waiting->waiting);
+                               BUG_ON(ret);
+                               waiting->state = PL08X_CHAN_RUNNING;
+                               waiting->waiting = NULL;
+                               pl08x_issue_pending(&waiting->chan);
+                               break;
+                       }
+               }
+       }
+
+       spin_unlock(&plchan->lock);
+}
+
+static irqreturn_t pl08x_irq(int irq, void *dev)
+{
+       struct pl08x_driver_data *pl08x = dev;
+       u32 mask = 0;
+       u32 val;
+       int i;
+
+       val = readl(pl08x->base + PL080_ERR_STATUS);
+       if (val) {
+               /*
+                * An error interrupt (on one or more channels)
+                */
+               dev_err(&pl08x->adev->dev,
+                       "%s error interrupt, register value 0x%08x\n",
+                               __func__, val);
+               /*
+                * Simply clear ALL PL08X error interrupts,
+                * regardless of channel and cause
+                * FIXME: should be 0x00000003 on PL081 really.
+                */
+               writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+       }
+       val = readl(pl08x->base + PL080_INT_STATUS);
+       for (i = 0; i < pl08x->vd->channels; i++) {
+               if ((1 << i) & val) {
+                       /* Locate physical channel */
+                       struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
+                       struct pl08x_dma_chan *plchan = phychan->serving;
+
+                       /* Schedule tasklet on this channel */
+                       tasklet_schedule(&plchan->tasklet);
+
+                       mask |= (1 << i);
+               }
+       }
+       /*
+        * Clear only the terminal interrupts on channels we processed
+        */
+       writel(mask, pl08x->base + PL080_TC_CLEAR);
+
+       return mask ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * Initialise the DMAC memcpy/slave channels.
+ * Make a local wrapper to hold required data
+ */
+static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
+                                          struct dma_device *dmadev,
+                                          unsigned int channels,
+                                          bool slave)
+{
+       struct pl08x_dma_chan *chan;
+       int i;
+
+       INIT_LIST_HEAD(&dmadev->channels);
+       /*
+        * Register as many many memcpy as we have physical channels,
+        * we won't always be able to use all but the code will have
+        * to cope with that situation.
+        */
+       for (i = 0; i < channels; i++) {
+               chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
+               if (!chan) {
+                       dev_err(&pl08x->adev->dev,
+                               "%s no memory for channel\n", __func__);
+                       return -ENOMEM;
+               }
+
+               chan->host = pl08x;
+               chan->state = PL08X_CHAN_IDLE;
+
+               if (slave) {
+                       chan->slave = true;
+                       chan->name = pl08x->pd->slave_channels[i].bus_id;
+                       chan->cd = &pl08x->pd->slave_channels[i];
+               } else {
+                       chan->cd = &pl08x->pd->memcpy_channel;
+                       chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
+                       if (!chan->name) {
+                               kfree(chan);
+                               return -ENOMEM;
+                       }
+               }
+               dev_info(&pl08x->adev->dev,
+                        "initialize virtual channel \"%s\"\n",
+                        chan->name);
+
+               chan->chan.device = dmadev;
+               atomic_set(&chan->last_issued, 0);
+               chan->lc = atomic_read(&chan->last_issued);
+
+               spin_lock_init(&chan->lock);
+               INIT_LIST_HEAD(&chan->desc_list);
+               tasklet_init(&chan->tasklet, pl08x_tasklet,
+                            (unsigned long) chan);
+
+               list_add_tail(&chan->chan.device_node, &dmadev->channels);
+       }
+       dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
+                i, slave ? "slave" : "memcpy");
+       return i;
+}
+
+static void pl08x_free_virtual_channels(struct dma_device *dmadev)
+{
+       struct pl08x_dma_chan *chan = NULL;
+       struct pl08x_dma_chan *next;
+
+       list_for_each_entry_safe(chan,
+                                next, &dmadev->channels, chan.device_node) {
+               list_del(&chan->chan.device_node);
+               kfree(chan);
+       }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
+{
+       switch (state) {
+       case PL08X_CHAN_IDLE:
+               return "idle";
+       case PL08X_CHAN_RUNNING:
+               return "running";
+       case PL08X_CHAN_PAUSED:
+               return "paused";
+       case PL08X_CHAN_WAITING:
+               return "waiting";
+       default:
+               break;
+       }
+       return "UNKNOWN STATE";
+}
+
+static int pl08x_debugfs_show(struct seq_file *s, void *data)
+{
+       struct pl08x_driver_data *pl08x = s->private;
+       struct pl08x_dma_chan *chan;
+       struct pl08x_phy_chan *ch;
+       unsigned long flags;
+       int i;
+
+       seq_printf(s, "PL08x physical channels:\n");
+       seq_printf(s, "CHANNEL:\tUSER:\n");
+       seq_printf(s, "--------\t-----\n");
+       for (i = 0; i < pl08x->vd->channels; i++) {
+               struct pl08x_dma_chan *virt_chan;
+
+               ch = &pl08x->phy_chans[i];
+
+               spin_lock_irqsave(&ch->lock, flags);
+               virt_chan = ch->serving;
+
+               seq_printf(s, "%d\t\t%s\n",
+                          ch->id, virt_chan ? virt_chan->name : "(none)");
+
+               spin_unlock_irqrestore(&ch->lock, flags);
+       }
+
+       seq_printf(s, "\nPL08x virtual memcpy channels:\n");
+       seq_printf(s, "CHANNEL:\tSTATE:\n");
+       seq_printf(s, "--------\t------\n");
+       list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+               seq_printf(s, "%s\t\t\%s\n", chan->name,
+                          pl08x_state_str(chan->state));
+       }
+
+       seq_printf(s, "\nPL08x virtual slave channels:\n");
+       seq_printf(s, "CHANNEL:\tSTATE:\n");
+       seq_printf(s, "--------\t------\n");
+       list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+               seq_printf(s, "%s\t\t\%s\n", chan->name,
+                          pl08x_state_str(chan->state));
+       }
+
+       return 0;
+}
+
+static int pl08x_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pl08x_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations pl08x_debugfs_operations = {
+       .open           = pl08x_debugfs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
+{
+       /* Expose a simple debugfs interface to view all clocks */
+       (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
+                                  NULL, pl08x,
+                                  &pl08x_debugfs_operations);
+}
+
+#else
+static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
+{
+}
+#endif
+
+static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
+{
+       struct pl08x_driver_data *pl08x;
+       struct vendor_data *vd = id->data;
+       int ret = 0;
+       int i;
+
+       ret = amba_request_regions(adev, NULL);
+       if (ret)
+               return ret;
+
+       /* Create the driver state holder */
+       pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
+       if (!pl08x) {
+               ret = -ENOMEM;
+               goto out_no_pl08x;
+       }
+
+       /* Initialize memcpy engine */
+       dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
+       pl08x->memcpy.dev = &adev->dev;
+       pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
+       pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
+       pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
+       pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
+       pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
+       pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
+       pl08x->memcpy.device_control = pl08x_control;
+
+       /* Initialize slave engine */
+       dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+       pl08x->slave.dev = &adev->dev;
+       pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
+       pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
+       pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
+       pl08x->slave.device_tx_status = pl08x_dma_tx_status;
+       pl08x->slave.device_issue_pending = pl08x_issue_pending;
+       pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+       pl08x->slave.device_control = pl08x_control;
+
+       /* Get the platform data */
+       pl08x->pd = dev_get_platdata(&adev->dev);
+       if (!pl08x->pd) {
+               dev_err(&adev->dev, "no platform data supplied\n");
+               goto out_no_platdata;
+       }
+
+       /* Assign useful pointers to the driver state */
+       pl08x->adev = adev;
+       pl08x->vd = vd;
+
+       /* A DMA memory pool for LLIs, align on 1-byte boundary */
+       pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
+                       PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+       if (!pl08x->pool) {
+               ret = -ENOMEM;
+               goto out_no_lli_pool;
+       }
+
+       spin_lock_init(&pl08x->lock);
+
+       pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
+       if (!pl08x->base) {
+               ret = -ENOMEM;
+               goto out_no_ioremap;
+       }
+
+       /* Turn on the PL08x */
+       pl08x_ensure_on(pl08x);
+
+       /*
+        * Attach the interrupt handler
+        */
+       writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+       writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
+
+       ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
+                         vd->name, pl08x);
+       if (ret) {
+               dev_err(&adev->dev, "%s failed to request interrupt %d\n",
+                       __func__, adev->irq[0]);
+               goto out_no_irq;
+       }
+
+       /* Initialize physical channels */
+       pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
+                       GFP_KERNEL);
+       if (!pl08x->phy_chans) {
+               dev_err(&adev->dev, "%s failed to allocate "
+                       "physical channel holders\n",
+                       __func__);
+               goto out_no_phychans;
+       }
+
+       for (i = 0; i < vd->channels; i++) {
+               struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
+
+               ch->id = i;
+               ch->base = pl08x->base + PL080_Cx_BASE(i);
+               spin_lock_init(&ch->lock);
+               ch->serving = NULL;
+               ch->signal = -1;
+               dev_info(&adev->dev,
+                        "physical channel %d is %s\n", i,
+                        pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
+       }
+
+       /* Register as many memcpy channels as there are physical channels */
+       ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
+                                             pl08x->vd->channels, false);
+       if (ret <= 0) {
+               dev_warn(&pl08x->adev->dev,
+                        "%s failed to enumerate memcpy channels - %d\n",
+                        __func__, ret);
+               goto out_no_memcpy;
+       }
+       pl08x->memcpy.chancnt = ret;
+
+       /* Register slave channels */
+       ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
+                                             pl08x->pd->num_slave_channels,
+                                             true);
+       if (ret <= 0) {
+               dev_warn(&pl08x->adev->dev,
+                       "%s failed to enumerate slave channels - %d\n",
+                               __func__, ret);
+               goto out_no_slave;
+       }
+       pl08x->slave.chancnt = ret;
+
+       ret = dma_async_device_register(&pl08x->memcpy);
+       if (ret) {
+               dev_warn(&pl08x->adev->dev,
+                       "%s failed to register memcpy as an async device - %d\n",
+                       __func__, ret);
+               goto out_no_memcpy_reg;
+       }
+
+       ret = dma_async_device_register(&pl08x->slave);
+       if (ret) {
+               dev_warn(&pl08x->adev->dev,
+                       "%s failed to register slave as an async device - %d\n",
+                       __func__, ret);
+               goto out_no_slave_reg;
+       }
+
+       amba_set_drvdata(adev, pl08x);
+       init_pl08x_debugfs(pl08x);
+       dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
+               vd->name, adev->res.start);
+       return 0;
+
+out_no_slave_reg:
+       dma_async_device_unregister(&pl08x->memcpy);
+out_no_memcpy_reg:
+       pl08x_free_virtual_channels(&pl08x->slave);
+out_no_slave:
+       pl08x_free_virtual_channels(&pl08x->memcpy);
+out_no_memcpy:
+       kfree(pl08x->phy_chans);
+out_no_phychans:
+       free_irq(adev->irq[0], pl08x);
+out_no_irq:
+       iounmap(pl08x->base);
+out_no_ioremap:
+       dma_pool_destroy(pl08x->pool);
+out_no_lli_pool:
+out_no_platdata:
+       kfree(pl08x);
+out_no_pl08x:
+       amba_release_regions(adev);
+       return ret;
+}
+
+/* PL080 has 8 channels and the PL080 have just 2 */
+static struct vendor_data vendor_pl080 = {
+       .name = "PL080",
+       .channels = 8,
+       .dualmaster = true,
+};
+
+static struct vendor_data vendor_pl081 = {
+       .name = "PL081",
+       .channels = 2,
+       .dualmaster = false,
+};
+
+static struct amba_id pl08x_ids[] = {
+       /* PL080 */
+       {
+               .id     = 0x00041080,
+               .mask   = 0x000fffff,
+               .data   = &vendor_pl080,
+       },
+       /* PL081 */
+       {
+               .id     = 0x00041081,
+               .mask   = 0x000fffff,
+               .data   = &vendor_pl081,
+       },
+       /* Nomadik 8815 PL080 variant */
+       {
+               .id     = 0x00280880,
+               .mask   = 0x00ffffff,
+               .data   = &vendor_pl080,
+       },
+       { 0, 0 },
+};
+
+static struct amba_driver pl08x_amba_driver = {
+       .drv.name       = DRIVER_NAME,
+       .id_table       = pl08x_ids,
+       .probe          = pl08x_probe,
+};
+
+static int __init pl08x_init(void)
+{
+       int retval;
+       retval = amba_driver_register(&pl08x_amba_driver);
+       if (retval)
+               printk(KERN_WARNING DRIVER_NAME
+                      "failed to register as an amba device (%d)\n",
+                      retval);
+       return retval;
+}
+subsys_initcall(pl08x_init);
index ae2b8714d19058cfc3371d7b7cc587b56fb2afbc..a6656834f0ff2f3d4241c204b424dbf4fcc40737 100644 (file)
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
 {
        return platform_driver_probe(&coh901318_driver, coh901318_probe);
 }
-subsys_initcall(coh901318_init);
+arch_initcall(coh901318_init);
 
 void __exit coh901318_exit(void)
 {
index 9d31d5eb95c1ea47618b01ed61206eaf315a7925..8bcb15fb959d1e3de64e79a5e90300c105d62ad3 100644 (file)
@@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device)
                !device->device_prep_dma_memset);
        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
                !device->device_prep_dma_interrupt);
+       BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+               !device->device_prep_dma_sg);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
                !device->device_prep_slave_sg);
+       BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
+               !device->device_prep_dma_cyclic);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
                !device->device_control);
 
@@ -702,7 +706,7 @@ int dma_async_device_register(struct dma_device *device)
        BUG_ON(!device->dev);
 
        /* note: this only matters in the
-        * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
+        * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
         */
        if (device_has_all_tx_types(device))
                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
@@ -976,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
        struct dma_chan *chan)
 {
        tx->chan = chan;
-       #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+       #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        spin_lock_init(&tx->lock);
        #endif
 }
index cea08bed9cf96f10ec9072c9c3a6aa6cf92fe506..286c3ac6bdcc236d020229ce0397ee9e78e07100 100644 (file)
 #include <linux/dmapool.h>
 #include <linux/of_platform.h>
 
-#include <asm/fsldma.h>
 #include "fsldma.h"
 
+static const char msg_ld_oom[] = "No free memory for link descriptor\n";
+
 static void dma_init(struct fsldma_chan *chan)
 {
        /* Reset the channel */
@@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
 
        new = fsl_dma_alloc_descriptor(chan);
        if (!new) {
-               dev_err(chan->dev, "No free memory for link descriptor\n");
+               dev_err(chan->dev, msg_ld_oom);
                return NULL;
        }
 
@@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
                /* Allocate the link descriptor from DMA pool */
                new = fsl_dma_alloc_descriptor(chan);
                if (!new) {
-                       dev_err(chan->dev,
-                                       "No free memory for link descriptor\n");
+                       dev_err(chan->dev, msg_ld_oom);
                        goto fail;
                }
 #ifdef FSL_DMA_LD_DEBUG
@@ -583,223 +583,205 @@ fail:
        return NULL;
 }
 
-/**
- * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: DMAEngine flags
- *
- * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
- * DMA_SLAVE API, this gets the device-specific information from the
- * chan->private variable.
- */
-static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
-       struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
+       struct scatterlist *dst_sg, unsigned int dst_nents,
+       struct scatterlist *src_sg, unsigned int src_nents,
+       unsigned long flags)
 {
-       struct fsldma_chan *chan;
        struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
-       struct fsl_dma_slave *slave;
-       size_t copy;
-
-       int i;
-       struct scatterlist *sg;
-       size_t sg_used;
-       size_t hw_used;
-       struct fsl_dma_hw_addr *hw;
-       dma_addr_t dma_dst, dma_src;
+       struct fsldma_chan *chan = to_fsl_chan(dchan);
+       size_t dst_avail, src_avail;
+       dma_addr_t dst, src;
+       size_t len;
 
-       if (!dchan)
+       /* basic sanity checks */
+       if (dst_nents == 0 || src_nents == 0)
                return NULL;
 
-       if (!dchan->private)
+       if (dst_sg == NULL || src_sg == NULL)
                return NULL;
 
-       chan = to_fsl_chan(dchan);
-       slave = dchan->private;
+       /*
+        * TODO: should we check that both scatterlists have the same
+        * TODO: number of bytes in total? Is that really an error?
+        */
 
-       if (list_empty(&slave->addresses))
-               return NULL;
+       /* get prepared for the loop */
+       dst_avail = sg_dma_len(dst_sg);
+       src_avail = sg_dma_len(src_sg);
 
-       hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
-       hw_used = 0;
+       /* run until we are out of scatterlist entries */
+       while (true) {
 
-       /*
-        * Build the hardware transaction to copy from the scatterlist to
-        * the hardware, or from the hardware to the scatterlist
-        *
-        * If you are copying from the hardware to the scatterlist and it
-        * takes two hardware entries to fill an entire page, then both
-        * hardware entries will be coalesced into the same page
-        *
-        * If you are copying from the scatterlist to the hardware and a
-        * single page can fill two hardware entries, then the data will
-        * be read out of the page into the first hardware entry, and so on
-        */
-       for_each_sg(sgl, sg, sg_len, i) {
-               sg_used = 0;
-
-               /* Loop until the entire scatterlist entry is used */
-               while (sg_used < sg_dma_len(sg)) {
-
-                       /*
-                        * If we've used up the current hardware address/length
-                        * pair, we need to load a new one
-                        *
-                        * This is done in a while loop so that descriptors with
-                        * length == 0 will be skipped
-                        */
-                       while (hw_used >= hw->length) {
-
-                               /*
-                                * If the current hardware entry is the last
-                                * entry in the list, we're finished
-                                */
-                               if (list_is_last(&hw->entry, &slave->addresses))
-                                       goto finished;
-
-                               /* Get the next hardware address/length pair */
-                               hw = list_entry(hw->entry.next,
-                                               struct fsl_dma_hw_addr, entry);
-                               hw_used = 0;
-                       }
-
-                       /* Allocate the link descriptor from DMA pool */
-                       new = fsl_dma_alloc_descriptor(chan);
-                       if (!new) {
-                               dev_err(chan->dev, "No free memory for "
-                                                      "link descriptor\n");
-                               goto fail;
-                       }
+               /* create the largest transaction possible */
+               len = min_t(size_t, src_avail, dst_avail);
+               len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
+               if (len == 0)
+                       goto fetch;
+
+               dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
+               src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
+
+               /* allocate and populate the descriptor */
+               new = fsl_dma_alloc_descriptor(chan);
+               if (!new) {
+                       dev_err(chan->dev, msg_ld_oom);
+                       goto fail;
+               }
 #ifdef FSL_DMA_LD_DEBUG
-                       dev_dbg(chan->dev, "new link desc alloc %p\n", new);
+               dev_dbg(chan->dev, "new link desc alloc %p\n", new);
 #endif
 
-                       /*
-                        * Calculate the maximum number of bytes to transfer,
-                        * making sure it is less than the DMA controller limit
-                        */
-                       copy = min_t(size_t, sg_dma_len(sg) - sg_used,
-                                            hw->length - hw_used);
-                       copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
-
-                       /*
-                        * DMA_FROM_DEVICE
-                        * from the hardware to the scatterlist
-                        *
-                        * DMA_TO_DEVICE
-                        * from the scatterlist to the hardware
-                        */
-                       if (direction == DMA_FROM_DEVICE) {
-                               dma_src = hw->address + hw_used;
-                               dma_dst = sg_dma_address(sg) + sg_used;
-                       } else {
-                               dma_src = sg_dma_address(sg) + sg_used;
-                               dma_dst = hw->address + hw_used;
-                       }
-
-                       /* Fill in the descriptor */
-                       set_desc_cnt(chan, &new->hw, copy);
-                       set_desc_src(chan, &new->hw, dma_src);
-                       set_desc_dst(chan, &new->hw, dma_dst);
-
-                       /*
-                        * If this is not the first descriptor, chain the
-                        * current descriptor after the previous descriptor
-                        */
-                       if (!first) {
-                               first = new;
-                       } else {
-                               set_desc_next(chan, &prev->hw,
-                                             new->async_tx.phys);
-                       }
-
-                       new->async_tx.cookie = 0;
-                       async_tx_ack(&new->async_tx);
-
-                       prev = new;
-                       sg_used += copy;
-                       hw_used += copy;
-
-                       /* Insert the link descriptor into the LD ring */
-                       list_add_tail(&new->node, &first->tx_list);
-               }
-       }
+               set_desc_cnt(chan, &new->hw, len);
+               set_desc_src(chan, &new->hw, src);
+               set_desc_dst(chan, &new->hw, dst);
 
-finished:
+               if (!first)
+                       first = new;
+               else
+                       set_desc_next(chan, &prev->hw, new->async_tx.phys);
 
-       /* All of the hardware address/length pairs had length == 0 */
-       if (!first || !new)
-               return NULL;
+               new->async_tx.cookie = 0;
+               async_tx_ack(&new->async_tx);
+               prev = new;
 
-       new->async_tx.flags = flags;
-       new->async_tx.cookie = -EBUSY;
+               /* Insert the link descriptor to the LD ring */
+               list_add_tail(&new->node, &first->tx_list);
 
-       /* Set End-of-link to the last link descriptor of new list */
-       set_ld_eol(chan, new);
+               /* update metadata */
+               dst_avail -= len;
+               src_avail -= len;
+
+fetch:
+               /* fetch the next dst scatterlist entry */
+               if (dst_avail == 0) {
+
+                       /* no more entries: we're done */
+                       if (dst_nents == 0)
+                               break;
+
+                       /* fetch the next entry: if there are no more: done */
+                       dst_sg = sg_next(dst_sg);
+                       if (dst_sg == NULL)
+                               break;
+
+                       dst_nents--;
+                       dst_avail = sg_dma_len(dst_sg);
+               }
 
-       /* Enable extra controller features */
-       if (chan->set_src_loop_size)
-               chan->set_src_loop_size(chan, slave->src_loop_size);
+               /* fetch the next src scatterlist entry */
+               if (src_avail == 0) {
 
-       if (chan->set_dst_loop_size)
-               chan->set_dst_loop_size(chan, slave->dst_loop_size);
+                       /* no more entries: we're done */
+                       if (src_nents == 0)
+                               break;
 
-       if (chan->toggle_ext_start)
-               chan->toggle_ext_start(chan, slave->external_start);
+                       /* fetch the next entry: if there are no more: done */
+                       src_sg = sg_next(src_sg);
+                       if (src_sg == NULL)
+                               break;
 
-       if (chan->toggle_ext_pause)
-               chan->toggle_ext_pause(chan, slave->external_pause);
+                       src_nents--;
+                       src_avail = sg_dma_len(src_sg);
+               }
+       }
 
-       if (chan->set_request_count)
-               chan->set_request_count(chan, slave->request_count);
+       new->async_tx.flags = flags; /* client is in control of this ack */
+       new->async_tx.cookie = -EBUSY;
+
+       /* Set End-of-link to the last link descriptor of new list */
+       set_ld_eol(chan, new);
 
        return &first->async_tx;
 
 fail:
-       /* If first was not set, then we failed to allocate the very first
-        * descriptor, and we're done */
        if (!first)
                return NULL;
 
+       fsldma_free_desc_list_reverse(chan, &first->tx_list);
+       return NULL;
+}
+
+/**
+ * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: DMAEngine flags
+ *
+ * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
+ * DMA_SLAVE API, this gets the device-specific information from the
+ * chan->private variable.
+ */
+static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
+       struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_data_direction direction, unsigned long flags)
+{
        /*
-        * First is set, so all of the descriptors we allocated have been added
-        * to first->tx_list, INCLUDING "first" itself. Therefore we
-        * must traverse the list backwards freeing each descriptor in turn
+        * This operation is not supported on the Freescale DMA controller
         *
-        * We're re-using variables for the loop, oh well
+        * However, we need to provide the function pointer to allow the
+        * device_control() method to work.
         */
-       fsldma_free_desc_list_reverse(chan, &first->tx_list);
        return NULL;
 }
 
 static int fsl_dma_device_control(struct dma_chan *dchan,
                                  enum dma_ctrl_cmd cmd, unsigned long arg)
 {
+       struct dma_slave_config *config;
        struct fsldma_chan *chan;
        unsigned long flags;
-
-       /* Only supports DMA_TERMINATE_ALL */
-       if (cmd != DMA_TERMINATE_ALL)
-               return -ENXIO;
+       int size;
 
        if (!dchan)
                return -EINVAL;
 
        chan = to_fsl_chan(dchan);
 
-       /* Halt the DMA engine */
-       dma_halt(chan);
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               /* Halt the DMA engine */
+               dma_halt(chan);
 
-       spin_lock_irqsave(&chan->desc_lock, flags);
+               spin_lock_irqsave(&chan->desc_lock, flags);
 
-       /* Remove and free all of the descriptors in the LD queue */
-       fsldma_free_desc_list(chan, &chan->ld_pending);
-       fsldma_free_desc_list(chan, &chan->ld_running);
+               /* Remove and free all of the descriptors in the LD queue */
+               fsldma_free_desc_list(chan, &chan->ld_pending);
+               fsldma_free_desc_list(chan, &chan->ld_running);
 
-       spin_unlock_irqrestore(&chan->desc_lock, flags);
+               spin_unlock_irqrestore(&chan->desc_lock, flags);
+               return 0;
+
+       case DMA_SLAVE_CONFIG:
+               config = (struct dma_slave_config *)arg;
+
+               /* make sure the channel supports setting burst size */
+               if (!chan->set_request_count)
+                       return -ENXIO;
+
+               /* we set the controller burst size depending on direction */
+               if (config->direction == DMA_TO_DEVICE)
+                       size = config->dst_addr_width * config->dst_maxburst;
+               else
+                       size = config->src_addr_width * config->src_maxburst;
+
+               chan->set_request_count(chan, size);
+               return 0;
+
+       case FSLDMA_EXTERNAL_START:
+
+               /* make sure the channel supports external start */
+               if (!chan->toggle_ext_start)
+                       return -ENXIO;
+
+               chan->toggle_ext_start(chan, arg);
+               return 0;
+
+       default:
+               return -ENXIO;
+       }
 
        return 0;
 }
@@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
 
        dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
        dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
+       dma_cap_set(DMA_SG, fdev->common.cap_mask);
        dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
        fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
        fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
        fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
        fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
+       fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
        fdev->common.device_tx_status = fsl_tx_status;
        fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
        fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
new file mode 100644 (file)
index 0000000..f629e49
--- /dev/null
@@ -0,0 +1,424 @@
+/*
+ * drivers/dma/imx-dma.c
+ *
+ * This file contains a driver for the Freescale i.MX DMA engine
+ * found on i.MX1/21/27
+ *
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+
+#include <asm/irq.h>
+#include <mach/dma-v1.h>
+#include <mach/hardware.h>
+
+struct imxdma_channel {
+       struct imxdma_engine            *imxdma;
+       unsigned int                    channel;
+       unsigned int                    imxdma_channel;
+
+       enum dma_slave_buswidth         word_size;
+       dma_addr_t                      per_address;
+       u32                             watermark_level;
+       struct dma_chan                 chan;
+       spinlock_t                      lock;
+       struct dma_async_tx_descriptor  desc;
+       dma_cookie_t                    last_completed;
+       enum dma_status                 status;
+       int                             dma_request;
+       struct scatterlist              *sg_list;
+};
+
+#define MAX_DMA_CHANNELS 8
+
+struct imxdma_engine {
+       struct device                   *dev;
+       struct dma_device               dma_device;
+       struct imxdma_channel           channel[MAX_DMA_CHANNELS];
+};
+
+static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct imxdma_channel, chan);
+}
+
+static void imxdma_handle(struct imxdma_channel *imxdmac)
+{
+       if (imxdmac->desc.callback)
+               imxdmac->desc.callback(imxdmac->desc.callback_param);
+       imxdmac->last_completed = imxdmac->desc.cookie;
+}
+
+static void imxdma_irq_handler(int channel, void *data)
+{
+       struct imxdma_channel *imxdmac = data;
+
+       imxdmac->status = DMA_SUCCESS;
+       imxdma_handle(imxdmac);
+}
+
+static void imxdma_err_handler(int channel, void *data, int error)
+{
+       struct imxdma_channel *imxdmac = data;
+
+       imxdmac->status = DMA_ERROR;
+       imxdma_handle(imxdmac);
+}
+
+static void imxdma_progression(int channel, void *data,
+               struct scatterlist *sg)
+{
+       struct imxdma_channel *imxdmac = data;
+
+       imxdmac->status = DMA_SUCCESS;
+       imxdma_handle(imxdmac);
+}
+
+static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+               unsigned long arg)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+       struct dma_slave_config *dmaengine_cfg = (void *)arg;
+       int ret;
+       unsigned int mode = 0;
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               imxdmac->status = DMA_ERROR;
+               imx_dma_disable(imxdmac->imxdma_channel);
+               return 0;
+       case DMA_SLAVE_CONFIG:
+               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+                       imxdmac->per_address = dmaengine_cfg->src_addr;
+                       imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
+                       imxdmac->word_size = dmaengine_cfg->src_addr_width;
+               } else {
+                       imxdmac->per_address = dmaengine_cfg->dst_addr;
+                       imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+                       imxdmac->word_size = dmaengine_cfg->dst_addr_width;
+               }
+
+               switch (imxdmac->word_size) {
+               case DMA_SLAVE_BUSWIDTH_1_BYTE:
+                       mode = IMX_DMA_MEMSIZE_8;
+                       break;
+               case DMA_SLAVE_BUSWIDTH_2_BYTES:
+                       mode = IMX_DMA_MEMSIZE_16;
+                       break;
+               default:
+               case DMA_SLAVE_BUSWIDTH_4_BYTES:
+                       mode = IMX_DMA_MEMSIZE_32;
+                       break;
+               }
+               ret = imx_dma_config_channel(imxdmac->imxdma_channel,
+                               mode | IMX_DMA_TYPE_FIFO,
+                               IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+                               imxdmac->dma_request, 1);
+
+               if (ret)
+                       return ret;
+
+               imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
+
+               return 0;
+       default:
+               return -ENOSYS;
+       }
+
+       return -EINVAL;
+}
+
+static enum dma_status imxdma_tx_status(struct dma_chan *chan,
+                                           dma_cookie_t cookie,
+                                           struct dma_tx_state *txstate)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+       dma_cookie_t last_used;
+       enum dma_status ret;
+
+       last_used = chan->cookie;
+
+       ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
+       dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
+
+       return ret;
+}
+
+static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
+{
+       dma_cookie_t cookie = imxdma->chan.cookie;
+
+       if (++cookie < 0)
+               cookie = 1;
+
+       imxdma->chan.cookie = cookie;
+       imxdma->desc.cookie = cookie;
+
+       return cookie;
+}
+
+static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
+       dma_cookie_t cookie;
+
+       spin_lock_irq(&imxdmac->lock);
+
+       cookie = imxdma_assign_cookie(imxdmac);
+
+       imx_dma_enable(imxdmac->imxdma_channel);
+
+       spin_unlock_irq(&imxdmac->lock);
+
+       return cookie;
+}
+
+static int imxdma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+       struct imx_dma_data *data = chan->private;
+
+       imxdmac->dma_request = data->dma_request;
+
+       dma_async_tx_descriptor_init(&imxdmac->desc, chan);
+       imxdmac->desc.tx_submit = imxdma_tx_submit;
+       /* txd.flags will be overwritten in prep funcs */
+       imxdmac->desc.flags = DMA_CTRL_ACK;
+
+       imxdmac->status = DMA_SUCCESS;
+
+       return 0;
+}
+
+static void imxdma_free_chan_resources(struct dma_chan *chan)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+
+       imx_dma_disable(imxdmac->imxdma_channel);
+
+       if (imxdmac->sg_list) {
+               kfree(imxdmac->sg_list);
+               imxdmac->sg_list = NULL;
+       }
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned long flags)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+       struct scatterlist *sg;
+       int i, ret, dma_length = 0;
+       unsigned int dmamode;
+
+       if (imxdmac->status == DMA_IN_PROGRESS)
+               return NULL;
+
+       imxdmac->status = DMA_IN_PROGRESS;
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               dma_length += sg->length;
+       }
+
+       if (direction == DMA_FROM_DEVICE)
+               dmamode = DMA_MODE_READ;
+       else
+               dmamode = DMA_MODE_WRITE;
+
+       ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
+                dma_length, imxdmac->per_address, dmamode);
+       if (ret)
+               return NULL;
+
+       return &imxdmac->desc;
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
+               struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+               size_t period_len, enum dma_data_direction direction)
+{
+       struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+       struct imxdma_engine *imxdma = imxdmac->imxdma;
+       int i, ret;
+       unsigned int periods = buf_len / period_len;
+       unsigned int dmamode;
+
+       dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
+                       __func__, imxdmac->channel, buf_len, period_len);
+
+       if (imxdmac->status == DMA_IN_PROGRESS)
+               return NULL;
+       imxdmac->status = DMA_IN_PROGRESS;
+
+       ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
+                       imxdma_progression);
+       if (ret) {
+               dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
+               return NULL;
+       }
+
+       if (imxdmac->sg_list)
+               kfree(imxdmac->sg_list);
+
+       imxdmac->sg_list = kcalloc(periods + 1,
+                       sizeof(struct scatterlist), GFP_KERNEL);
+       if (!imxdmac->sg_list)
+               return NULL;
+
+       sg_init_table(imxdmac->sg_list, periods);
+
+       for (i = 0; i < periods; i++) {
+               imxdmac->sg_list[i].page_link = 0;
+               imxdmac->sg_list[i].offset = 0;
+               imxdmac->sg_list[i].dma_address = dma_addr;
+               imxdmac->sg_list[i].length = period_len;
+               dma_addr += period_len;
+       }
+
+       /* close the loop */
+       imxdmac->sg_list[periods].offset = 0;
+       imxdmac->sg_list[periods].length = 0;
+       imxdmac->sg_list[periods].page_link =
+               ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
+
+       if (direction == DMA_FROM_DEVICE)
+               dmamode = DMA_MODE_READ;
+       else
+               dmamode = DMA_MODE_WRITE;
+
+       ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
+                IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
+       if (ret)
+               return NULL;
+
+       return &imxdmac->desc;
+}
+
+static void imxdma_issue_pending(struct dma_chan *chan)
+{
+       /*
+        * Nothing to do. We only have a single descriptor
+        */
+}
+
+static int __init imxdma_probe(struct platform_device *pdev)
+{
+       struct imxdma_engine *imxdma;
+       int ret, i;
+
+       imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
+       if (!imxdma)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&imxdma->dma_device.channels);
+
+       /* Initialize channel parameters */
+       for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+               struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+               imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
+                               DMA_PRIO_MEDIUM);
+               if ((int)imxdmac->channel < 0) {
+                       ret = -ENODEV;
+                       goto err_init;
+               }
+
+               imx_dma_setup_handlers(imxdmac->imxdma_channel,
+                      imxdma_irq_handler, imxdma_err_handler, imxdmac);
+
+               imxdmac->imxdma = imxdma;
+               spin_lock_init(&imxdmac->lock);
+
+               dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+               dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
+               imxdmac->chan.device = &imxdma->dma_device;
+               imxdmac->chan.chan_id = i;
+               imxdmac->channel = i;
+
+               /* Add the channel to the DMAC list */
+               list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
+       }
+
+       imxdma->dev = &pdev->dev;
+       imxdma->dma_device.dev = &pdev->dev;
+
+       imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
+       imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
+       imxdma->dma_device.device_tx_status = imxdma_tx_status;
+       imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
+       imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
+       imxdma->dma_device.device_control = imxdma_control;
+       imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
+
+       platform_set_drvdata(pdev, imxdma);
+
+       ret = dma_async_device_register(&imxdma->dma_device);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to register\n");
+               goto err_init;
+       }
+
+       return 0;
+
+err_init:
+       while (i-- >= 0) {
+               struct imxdma_channel *imxdmac = &imxdma->channel[i];
+               imx_dma_free(imxdmac->imxdma_channel);
+       }
+
+       kfree(imxdma);
+       return ret;
+}
+
+static int __exit imxdma_remove(struct platform_device *pdev)
+{
+       struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
+       int i;
+
+        dma_async_device_unregister(&imxdma->dma_device);
+
+       for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+               struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+                imx_dma_free(imxdmac->imxdma_channel);
+       }
+
+        kfree(imxdma);
+
+        return 0;
+}
+
+static struct platform_driver imxdma_driver = {
+       .driver         = {
+               .name   = "imx-dma",
+       },
+       .remove         = __exit_p(imxdma_remove),
+};
+
+static int __init imxdma_module_init(void)
+{
+       return platform_driver_probe(&imxdma_driver, imxdma_probe);
+}
+subsys_initcall(imxdma_module_init);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX dma driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
new file mode 100644 (file)
index 0000000..0834323
--- /dev/null
@@ -0,0 +1,1392 @@
+/*
+ * drivers/dma/imx-sdma.c
+ *
+ * This file contains a driver for the Freescale Smart DMA engine
+ *
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * Based on code from Freescale:
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+
+#include <asm/irq.h>
+#include <mach/sdma.h>
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+/* SDMA registers */
+#define SDMA_H_C0PTR           0x000
+#define SDMA_H_INTR            0x004
+#define SDMA_H_STATSTOP                0x008
+#define SDMA_H_START           0x00c
+#define SDMA_H_EVTOVR          0x010
+#define SDMA_H_DSPOVR          0x014
+#define SDMA_H_HOSTOVR         0x018
+#define SDMA_H_EVTPEND         0x01c
+#define SDMA_H_DSPENBL         0x020
+#define SDMA_H_RESET           0x024
+#define SDMA_H_EVTERR          0x028
+#define SDMA_H_INTRMSK         0x02c
+#define SDMA_H_PSW             0x030
+#define SDMA_H_EVTERRDBG       0x034
+#define SDMA_H_CONFIG          0x038
+#define SDMA_ONCE_ENB          0x040
+#define SDMA_ONCE_DATA         0x044
+#define SDMA_ONCE_INSTR                0x048
+#define SDMA_ONCE_STAT         0x04c
+#define SDMA_ONCE_CMD          0x050
+#define SDMA_EVT_MIRROR                0x054
+#define SDMA_ILLINSTADDR       0x058
+#define SDMA_CHN0ADDR          0x05c
+#define SDMA_ONCE_RTB          0x060
+#define SDMA_XTRIG_CONF1       0x070
+#define SDMA_XTRIG_CONF2       0x074
+#define SDMA_CHNENBL0_V2       0x200
+#define SDMA_CHNENBL0_V1       0x080
+#define SDMA_CHNPRI_0          0x100
+
+/*
+ * Buffer descriptor status values.
+ */
+#define BD_DONE  0x01
+#define BD_WRAP  0x02
+#define BD_CONT  0x04
+#define BD_INTR  0x08
+#define BD_RROR  0x10
+#define BD_LAST  0x20
+#define BD_EXTD  0x80
+
+/*
+ * Data Node descriptor status values.
+ */
+#define DND_END_OF_FRAME  0x80
+#define DND_END_OF_XFER   0x40
+#define DND_DONE          0x20
+#define DND_UNUSED        0x01
+
+/*
+ * IPCV2 descriptor status values.
+ */
+#define BD_IPCV2_END_OF_FRAME  0x40
+
+#define IPCV2_MAX_NODES        50
+/*
+ * Error bit set in the CCB status field by the SDMA,
+ * in setbd routine, in case of a transfer error
+ */
+#define DATA_ERROR  0x10000000
+
+/*
+ * Buffer descriptor commands.
+ */
+#define C0_ADDR             0x01
+#define C0_LOAD             0x02
+#define C0_DUMP             0x03
+#define C0_SETCTX           0x07
+#define C0_GETCTX           0x03
+#define C0_SETDM            0x01
+#define C0_SETPM            0x04
+#define C0_GETDM            0x02
+#define C0_GETPM            0x08
+/*
+ * Change endianness indicator in the BD command field
+ */
+#define CHANGE_ENDIANNESS   0x80
+
+/*
+ * Mode/Count of data node descriptors - IPCv2
+ */
+struct sdma_mode_count {
+       u32 count   : 16; /* size of the buffer pointed by this BD */
+       u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
+       u32 command :  8; /* command mostlky used for channel 0 */
+};
+
+/*
+ * Buffer descriptor
+ */
+struct sdma_buffer_descriptor {
+       struct sdma_mode_count  mode;
+       u32 buffer_addr;        /* address of the buffer described */
+       u32 ext_buffer_addr;    /* extended buffer address */
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_channel_control - Channel control Block
+ *
+ * @current_bd_ptr     current buffer descriptor processed
+ * @base_bd_ptr                first element of buffer descriptor array
+ * @unused             padding. The SDMA engine expects an array of 128 byte
+ *                     control blocks
+ */
+struct sdma_channel_control {
+       u32 current_bd_ptr;
+       u32 base_bd_ptr;
+       u32 unused[2];
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_state_registers - SDMA context for a channel
+ *
+ * @pc:                program counter
+ * @t:         test bit: status of arithmetic & test instruction
+ * @rpc:       return program counter
+ * @sf:                source fault while loading data
+ * @spc:       loop start program counter
+ * @df:                destination fault while storing data
+ * @epc:       loop end program counter
+ * @lm:                loop mode
+ */
+struct sdma_state_registers {
+       u32 pc     :14;
+       u32 unused1: 1;
+       u32 t      : 1;
+       u32 rpc    :14;
+       u32 unused0: 1;
+       u32 sf     : 1;
+       u32 spc    :14;
+       u32 unused2: 1;
+       u32 df     : 1;
+       u32 epc    :14;
+       u32 lm     : 2;
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_context_data - sdma context specific to a channel
+ *
+ * @channel_state:     channel state bits
+ * @gReg:              general registers
+ * @mda:               burst dma destination address register
+ * @msa:               burst dma source address register
+ * @ms:                        burst dma status register
+ * @md:                        burst dma data register
+ * @pda:               peripheral dma destination address register
+ * @psa:               peripheral dma source address register
+ * @ps:                        peripheral dma status register
+ * @pd:                        peripheral dma data register
+ * @ca:                        CRC polynomial register
+ * @cs:                        CRC accumulator register
+ * @dda:               dedicated core destination address register
+ * @dsa:               dedicated core source address register
+ * @ds:                        dedicated core status register
+ * @dd:                        dedicated core data register
+ */
+struct sdma_context_data {
+       struct sdma_state_registers  channel_state;
+       u32  gReg[8];
+       u32  mda;
+       u32  msa;
+       u32  ms;
+       u32  md;
+       u32  pda;
+       u32  psa;
+       u32  ps;
+       u32  pd;
+       u32  ca;
+       u32  cs;
+       u32  dda;
+       u32  dsa;
+       u32  ds;
+       u32  dd;
+       u32  scratch0;
+       u32  scratch1;
+       u32  scratch2;
+       u32  scratch3;
+       u32  scratch4;
+       u32  scratch5;
+       u32  scratch6;
+       u32  scratch7;
+} __attribute__ ((packed));
+
+#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
+
+struct sdma_engine;
+
+/**
+ * struct sdma_channel - housekeeping for a SDMA channel
+ *
+ * @sdma               pointer to the SDMA engine for this channel
+ * @channel            the channel number, matches dmaengine chan_id
+ * @direction          transfer type. Needed for setting SDMA script
+ * @peripheral_type    Peripheral type. Needed for setting SDMA script
+ * @event_id0          aka dma request line
+ * @event_id1          for channels that use 2 events
+ * @word_size          peripheral access size
+ * @buf_tail           ID of the buffer that was processed
+ * @done               channel completion
+ * @num_bd             max NUM_BD. number of descriptors currently handling
+ */
+struct sdma_channel {
+       struct sdma_engine              *sdma;
+       unsigned int                    channel;
+       enum dma_data_direction         direction;
+       enum sdma_peripheral_type       peripheral_type;
+       unsigned int                    event_id0;
+       unsigned int                    event_id1;
+       enum dma_slave_buswidth         word_size;
+       unsigned int                    buf_tail;
+       struct completion               done;
+       unsigned int                    num_bd;
+       struct sdma_buffer_descriptor   *bd;
+       dma_addr_t                      bd_phys;
+       unsigned int                    pc_from_device, pc_to_device;
+       unsigned long                   flags;
+       dma_addr_t                      per_address;
+       u32                             event_mask0, event_mask1;
+       u32                             watermark_level;
+       u32                             shp_addr, per_addr;
+       struct dma_chan                 chan;
+       spinlock_t                      lock;
+       struct dma_async_tx_descriptor  desc;
+       dma_cookie_t                    last_completed;
+       enum dma_status                 status;
+};
+
+#define IMX_DMA_SG_LOOP                (1 << 0)
+
+#define MAX_DMA_CHANNELS 32
+#define MXC_SDMA_DEFAULT_PRIORITY 1
+#define MXC_SDMA_MIN_PRIORITY 1
+#define MXC_SDMA_MAX_PRIORITY 7
+
+/**
+ * struct sdma_script_start_addrs - SDMA script start pointers
+ *
+ * start addresses of the different functions in the physical
+ * address space of the SDMA engine.
+ */
+struct sdma_script_start_addrs {
+       u32 ap_2_ap_addr;
+       u32 ap_2_bp_addr;
+       u32 ap_2_ap_fixed_addr;
+       u32 bp_2_ap_addr;
+       u32 loopback_on_dsp_side_addr;
+       u32 mcu_interrupt_only_addr;
+       u32 firi_2_per_addr;
+       u32 firi_2_mcu_addr;
+       u32 per_2_firi_addr;
+       u32 mcu_2_firi_addr;
+       u32 uart_2_per_addr;
+       u32 uart_2_mcu_addr;
+       u32 per_2_app_addr;
+       u32 mcu_2_app_addr;
+       u32 per_2_per_addr;
+       u32 uartsh_2_per_addr;
+       u32 uartsh_2_mcu_addr;
+       u32 per_2_shp_addr;
+       u32 mcu_2_shp_addr;
+       u32 ata_2_mcu_addr;
+       u32 mcu_2_ata_addr;
+       u32 app_2_per_addr;
+       u32 app_2_mcu_addr;
+       u32 shp_2_per_addr;
+       u32 shp_2_mcu_addr;
+       u32 mshc_2_mcu_addr;
+       u32 mcu_2_mshc_addr;
+       u32 spdif_2_mcu_addr;
+       u32 mcu_2_spdif_addr;
+       u32 asrc_2_mcu_addr;
+       u32 ext_mem_2_ipu_addr;
+       u32 descrambler_addr;
+       u32 dptc_dvfs_addr;
+       u32 utra_addr;
+       u32 ram_code_start_addr;
+};
+
+#define SDMA_FIRMWARE_MAGIC 0x414d4453
+
+/**
+ * struct sdma_firmware_header - Layout of the firmware image
+ *
+ * @magic              "SDMA"
+ * @version_major      increased whenever layout of struct sdma_script_start_addrs
+ *                     changes.
+ * @version_minor      firmware minor version (for binary compatible changes)
+ * @script_addrs_start offset of struct sdma_script_start_addrs in this image
+ * @num_script_addrs   Number of script addresses in this image
+ * @ram_code_start     offset of SDMA ram image in this firmware image
+ * @ram_code_size      size of SDMA ram image
+ * @script_addrs       Stores the start address of the SDMA scripts
+ *                     (in SDMA memory space)
+ */
+struct sdma_firmware_header {
+       u32     magic;
+       u32     version_major;
+       u32     version_minor;
+       u32     script_addrs_start;
+       u32     num_script_addrs;
+       u32     ram_code_start;
+       u32     ram_code_size;
+};
+
+struct sdma_engine {
+       struct device                   *dev;
+       struct sdma_channel             channel[MAX_DMA_CHANNELS];
+       struct sdma_channel_control     *channel_control;
+       void __iomem                    *regs;
+       unsigned int                    version;
+       unsigned int                    num_events;
+       struct sdma_context_data        *context;
+       dma_addr_t                      context_phys;
+       struct dma_device               dma_device;
+       struct clk                      *clk;
+       struct sdma_script_start_addrs  *script_addrs;
+};
+
+#define SDMA_H_CONFIG_DSPDMA   (1 << 12) /* indicates if the DSPDMA is used */
+#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
+#define SDMA_H_CONFIG_ACR      (1 << 4)  /* indicates if AHB freq /core freq = 2 or 1 */
+#define SDMA_H_CONFIG_CSM      (3)       /* indicates which context switch mode is selected*/
+
+static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
+{
+       u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
+
+       return chnenbl0 + event * 4;
+}
+
+static int sdma_config_ownership(struct sdma_channel *sdmac,
+               bool event_override, bool mcu_override, bool dsp_override)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+       u32 evt, mcu, dsp;
+
+       if (event_override && mcu_override && dsp_override)
+               return -EINVAL;
+
+       evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
+       mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
+       dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
+
+       if (dsp_override)
+               dsp &= ~(1 << channel);
+       else
+               dsp |= (1 << channel);
+
+       if (event_override)
+               evt &= ~(1 << channel);
+       else
+               evt |= (1 << channel);
+
+       if (mcu_override)
+               mcu &= ~(1 << channel);
+       else
+               mcu |= (1 << channel);
+
+       __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
+       __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
+       __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
+
+       return 0;
+}
+
+/*
+ * sdma_run_channel - run a channel and wait till it's done
+ */
+static int sdma_run_channel(struct sdma_channel *sdmac)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+       int ret;
+
+       init_completion(&sdmac->done);
+
+       __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+
+       ret = wait_for_completion_timeout(&sdmac->done, HZ);
+
+       return ret ? 0 : -ETIMEDOUT;
+}
+
+static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
+               u32 address)
+{
+       struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+       void *buf_virt;
+       dma_addr_t buf_phys;
+       int ret;
+
+       buf_virt = dma_alloc_coherent(NULL,
+                       size,
+                       &buf_phys, GFP_KERNEL);
+       if (!buf_virt)
+               return -ENOMEM;
+
+       bd0->mode.command = C0_SETPM;
+       bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+       bd0->mode.count = size / 2;
+       bd0->buffer_addr = buf_phys;
+       bd0->ext_buffer_addr = address;
+
+       memcpy(buf_virt, buf, size);
+
+       ret = sdma_run_channel(&sdma->channel[0]);
+
+       dma_free_coherent(NULL, size, buf_virt, buf_phys);
+
+       return ret;
+}
+
+static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+       u32 val;
+       u32 chnenbl = chnenbl_ofs(sdma, event);
+
+       val = __raw_readl(sdma->regs + chnenbl);
+       val |= (1 << channel);
+       __raw_writel(val, sdma->regs + chnenbl);
+}
+
+static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+       u32 chnenbl = chnenbl_ofs(sdma, event);
+       u32 val;
+
+       val = __raw_readl(sdma->regs + chnenbl);
+       val &= ~(1 << channel);
+       __raw_writel(val, sdma->regs + chnenbl);
+}
+
+static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+{
+       struct sdma_buffer_descriptor *bd;
+
+       /*
+        * loop mode. Iterate over descriptors, re-setup them and
+        * call callback function.
+        */
+       while (1) {
+               bd = &sdmac->bd[sdmac->buf_tail];
+
+               if (bd->mode.status & BD_DONE)
+                       break;
+
+               if (bd->mode.status & BD_RROR)
+                       sdmac->status = DMA_ERROR;
+               else
+                       sdmac->status = DMA_SUCCESS;
+
+               bd->mode.status |= BD_DONE;
+               sdmac->buf_tail++;
+               sdmac->buf_tail %= sdmac->num_bd;
+
+               if (sdmac->desc.callback)
+                       sdmac->desc.callback(sdmac->desc.callback_param);
+       }
+}
+
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
+{
+       struct sdma_buffer_descriptor *bd;
+       int i, error = 0;
+
+       /*
+        * non loop mode. Iterate over all descriptors, collect
+        * errors and call callback function
+        */
+       for (i = 0; i < sdmac->num_bd; i++) {
+               bd = &sdmac->bd[i];
+
+                if (bd->mode.status & (BD_DONE | BD_RROR))
+                       error = -EIO;
+       }
+
+       if (error)
+               sdmac->status = DMA_ERROR;
+       else
+               sdmac->status = DMA_SUCCESS;
+
+       if (sdmac->desc.callback)
+               sdmac->desc.callback(sdmac->desc.callback_param);
+       sdmac->last_completed = sdmac->desc.cookie;
+}
+
+static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+{
+       complete(&sdmac->done);
+
+       /* not interested in channel 0 interrupts */
+       if (sdmac->channel == 0)
+               return;
+
+       if (sdmac->flags & IMX_DMA_SG_LOOP)
+               sdma_handle_channel_loop(sdmac);
+       else
+               mxc_sdma_handle_channel_normal(sdmac);
+}
+
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+{
+       struct sdma_engine *sdma = dev_id;
+       u32 stat;
+
+       stat = __raw_readl(sdma->regs + SDMA_H_INTR);
+       __raw_writel(stat, sdma->regs + SDMA_H_INTR);
+
+       while (stat) {
+               int channel = fls(stat) - 1;
+               struct sdma_channel *sdmac = &sdma->channel[channel];
+
+               mxc_sdma_handle_channel(sdmac);
+
+               stat &= ~(1 << channel);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * sets the pc of SDMA script according to the peripheral type
+ */
+static void sdma_get_pc(struct sdma_channel *sdmac,
+               enum sdma_peripheral_type peripheral_type)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int per_2_emi = 0, emi_2_per = 0;
+       /*
+        * These are needed once we start to support transfers between
+        * two peripherals or memory-to-memory transfers
+        */
+       int per_2_per = 0, emi_2_emi = 0;
+
+       sdmac->pc_from_device = 0;
+       sdmac->pc_to_device = 0;
+
+       switch (peripheral_type) {
+       case IMX_DMATYPE_MEMORY:
+               emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
+               break;
+       case IMX_DMATYPE_DSP:
+               emi_2_per = sdma->script_addrs->bp_2_ap_addr;
+               per_2_emi = sdma->script_addrs->ap_2_bp_addr;
+               break;
+       case IMX_DMATYPE_FIRI:
+               per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
+               break;
+       case IMX_DMATYPE_UART:
+               per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+               break;
+       case IMX_DMATYPE_UART_SP:
+               per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+               break;
+       case IMX_DMATYPE_ATA:
+               per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
+               break;
+       case IMX_DMATYPE_CSPI:
+       case IMX_DMATYPE_EXT:
+       case IMX_DMATYPE_SSI:
+               per_2_emi = sdma->script_addrs->app_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+               break;
+       case IMX_DMATYPE_SSI_SP:
+       case IMX_DMATYPE_MMC:
+       case IMX_DMATYPE_SDHC:
+       case IMX_DMATYPE_CSPI_SP:
+       case IMX_DMATYPE_ESAI:
+       case IMX_DMATYPE_MSHC_SP:
+               per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+               break;
+       case IMX_DMATYPE_ASRC:
+               per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
+               per_2_per = sdma->script_addrs->per_2_per_addr;
+               break;
+       case IMX_DMATYPE_MSHC:
+               per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
+               break;
+       case IMX_DMATYPE_CCM:
+               per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
+               break;
+       case IMX_DMATYPE_SPDIF:
+               per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
+               break;
+       case IMX_DMATYPE_IPU_MEMORY:
+               emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
+               break;
+       default:
+               break;
+       }
+
+       sdmac->pc_from_device = per_2_emi;
+       sdmac->pc_to_device = emi_2_per;
+}
+
+static int sdma_load_context(struct sdma_channel *sdmac)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+       int load_address;
+       struct sdma_context_data *context = sdma->context;
+       struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+       int ret;
+
+       if (sdmac->direction == DMA_FROM_DEVICE) {
+               load_address = sdmac->pc_from_device;
+       } else {
+               load_address = sdmac->pc_to_device;
+       }
+
+       if (load_address < 0)
+               return load_address;
+
+       dev_dbg(sdma->dev, "load_address = %d\n", load_address);
+       dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
+       dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
+       dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
+       dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
+       dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+
+       memset(context, 0, sizeof(*context));
+       context->channel_state.pc = load_address;
+
+       /* Send by context the event mask,base address for peripheral
+        * and watermark level
+        */
+       context->gReg[0] = sdmac->event_mask1;
+       context->gReg[1] = sdmac->event_mask0;
+       context->gReg[2] = sdmac->per_addr;
+       context->gReg[6] = sdmac->shp_addr;
+       context->gReg[7] = sdmac->watermark_level;
+
+       bd0->mode.command = C0_SETDM;
+       bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+       bd0->mode.count = sizeof(*context) / 4;
+       bd0->buffer_addr = sdma->context_phys;
+       bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+
+       ret = sdma_run_channel(&sdma->channel[0]);
+
+       return ret;
+}
+
+static void sdma_disable_channel(struct sdma_channel *sdmac)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+
+       __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
+       sdmac->status = DMA_ERROR;
+}
+
+static int sdma_config_channel(struct sdma_channel *sdmac)
+{
+       int ret;
+
+       sdma_disable_channel(sdmac);
+
+       sdmac->event_mask0 = 0;
+       sdmac->event_mask1 = 0;
+       sdmac->shp_addr = 0;
+       sdmac->per_addr = 0;
+
+       if (sdmac->event_id0) {
+               if (sdmac->event_id0 > 32)
+                       return -EINVAL;
+               sdma_event_enable(sdmac, sdmac->event_id0);
+       }
+
+       switch (sdmac->peripheral_type) {
+       case IMX_DMATYPE_DSP:
+               sdma_config_ownership(sdmac, false, true, true);
+               break;
+       case IMX_DMATYPE_MEMORY:
+               sdma_config_ownership(sdmac, false, true, false);
+               break;
+       default:
+               sdma_config_ownership(sdmac, true, true, false);
+               break;
+       }
+
+       sdma_get_pc(sdmac, sdmac->peripheral_type);
+
+       if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
+                       (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
+               /* Handle multiple event channels differently */
+               if (sdmac->event_id1) {
+                       sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
+                       if (sdmac->event_id1 > 31)
+                               sdmac->watermark_level |= 1 << 31;
+                       sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
+                       if (sdmac->event_id0 > 31)
+                               sdmac->watermark_level |= 1 << 30;
+               } else {
+                       sdmac->event_mask0 = 1 << sdmac->event_id0;
+                       sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
+               }
+               /* Watermark Level */
+               sdmac->watermark_level |= sdmac->watermark_level;
+               /* Address */
+               sdmac->shp_addr = sdmac->per_address;
+       } else {
+               sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
+       }
+
+       ret = sdma_load_context(sdmac);
+
+       return ret;
+}
+
+static int sdma_set_channel_priority(struct sdma_channel *sdmac,
+               unsigned int priority)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+
+       if (priority < MXC_SDMA_MIN_PRIORITY
+           || priority > MXC_SDMA_MAX_PRIORITY) {
+               return -EINVAL;
+       }
+
+       __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
+
+       return 0;
+}
+
+static int sdma_request_channel(struct sdma_channel *sdmac)
+{
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+       int ret = -EBUSY;
+
+       sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
+       if (!sdmac->bd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       memset(sdmac->bd, 0, PAGE_SIZE);
+
+       sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
+       sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+       clk_enable(sdma->clk);
+
+       sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+
+       init_completion(&sdmac->done);
+
+       sdmac->buf_tail = 0;
+
+       return 0;
+out:
+
+       return ret;
+}
+
+static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
+{
+       __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+}
+
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+{
+       dma_cookie_t cookie = sdma->chan.cookie;
+
+       if (++cookie < 0)
+               cookie = 1;
+
+       sdma->chan.cookie = cookie;
+       sdma->desc.cookie = cookie;
+
+       return cookie;
+}
+
+static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct sdma_channel, chan);
+}
+
+static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
+       struct sdma_engine *sdma = sdmac->sdma;
+       dma_cookie_t cookie;
+
+       spin_lock_irq(&sdmac->lock);
+
+       cookie = sdma_assign_cookie(sdmac);
+
+       sdma_enable_channel(sdma, tx->chan->chan_id);
+
+       spin_unlock_irq(&sdmac->lock);
+
+       return cookie;
+}
+
+static int sdma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct imx_dma_data *data = chan->private;
+       int prio, ret;
+
+       /* No need to execute this for internal channel 0 */
+       if (chan->chan_id == 0)
+               return 0;
+
+       if (!data)
+               return -EINVAL;
+
+       switch (data->priority) {
+       case DMA_PRIO_HIGH:
+               prio = 3;
+               break;
+       case DMA_PRIO_MEDIUM:
+               prio = 2;
+               break;
+       case DMA_PRIO_LOW:
+       default:
+               prio = 1;
+               break;
+       }
+
+       sdmac->peripheral_type = data->peripheral_type;
+       sdmac->event_id0 = data->dma_request;
+       ret = sdma_set_channel_priority(sdmac, prio);
+       if (ret)
+               return ret;
+
+       ret = sdma_request_channel(sdmac);
+       if (ret)
+               return ret;
+
+       dma_async_tx_descriptor_init(&sdmac->desc, chan);
+       sdmac->desc.tx_submit = sdma_tx_submit;
+       /* txd.flags will be overwritten in prep funcs */
+       sdmac->desc.flags = DMA_CTRL_ACK;
+
+       return 0;
+}
+
+static void sdma_free_chan_resources(struct dma_chan *chan)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_engine *sdma = sdmac->sdma;
+
+       sdma_disable_channel(sdmac);
+
+       if (sdmac->event_id0)
+               sdma_event_disable(sdmac, sdmac->event_id0);
+       if (sdmac->event_id1)
+               sdma_event_disable(sdmac, sdmac->event_id1);
+
+       sdmac->event_id0 = 0;
+       sdmac->event_id1 = 0;
+
+       sdma_set_channel_priority(sdmac, 0);
+
+       dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
+
+       clk_disable(sdma->clk);
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned long flags)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_engine *sdma = sdmac->sdma;
+       int ret, i, count;
+       int channel = chan->chan_id;
+       struct scatterlist *sg;
+
+       if (sdmac->status == DMA_IN_PROGRESS)
+               return NULL;
+       sdmac->status = DMA_IN_PROGRESS;
+
+       sdmac->flags = 0;
+
+       dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
+                       sg_len, channel);
+
+       sdmac->direction = direction;
+       ret = sdma_load_context(sdmac);
+       if (ret)
+               goto err_out;
+
+       if (sg_len > NUM_BD) {
+               dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+                               channel, sg_len, NUM_BD);
+               ret = -EINVAL;
+               goto err_out;
+       }
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+               int param;
+
+               bd->buffer_addr = sgl->dma_address;
+
+               count = sg->length;
+
+               if (count > 0xffff) {
+                       dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
+                                       channel, count, 0xffff);
+                       ret = -EINVAL;
+                       goto err_out;
+               }
+
+               bd->mode.count = count;
+
+               if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
+                       ret =  -EINVAL;
+                       goto err_out;
+               }
+               if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+                       bd->mode.command = 0;
+               else
+                       bd->mode.command = sdmac->word_size;
+
+               param = BD_DONE | BD_EXTD | BD_CONT;
+
+               if (sdmac->flags & IMX_DMA_SG_LOOP) {
+                       param |= BD_INTR;
+                       if (i + 1 == sg_len)
+                               param |= BD_WRAP;
+               }
+
+               if (i + 1 == sg_len)
+                       param |= BD_INTR;
+
+               dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+                               i, count, sg->dma_address,
+                               param & BD_WRAP ? "wrap" : "",
+                               param & BD_INTR ? " intr" : "");
+
+               bd->mode.status = param;
+       }
+
+       sdmac->num_bd = sg_len;
+       sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+       return &sdmac->desc;
+err_out:
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
+               struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+               size_t period_len, enum dma_data_direction direction)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_engine *sdma = sdmac->sdma;
+       int num_periods = buf_len / period_len;
+       int channel = chan->chan_id;
+       int ret, i = 0, buf = 0;
+
+       dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+       if (sdmac->status == DMA_IN_PROGRESS)
+               return NULL;
+
+       sdmac->status = DMA_IN_PROGRESS;
+
+       sdmac->flags |= IMX_DMA_SG_LOOP;
+       sdmac->direction = direction;
+       ret = sdma_load_context(sdmac);
+       if (ret)
+               goto err_out;
+
+       if (num_periods > NUM_BD) {
+               dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+                               channel, num_periods, NUM_BD);
+               goto err_out;
+       }
+
+       if (period_len > 0xffff) {
+               dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
+                               channel, period_len, 0xffff);
+               goto err_out;
+       }
+
+       while (buf < buf_len) {
+               struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+               int param;
+
+               bd->buffer_addr = dma_addr;
+
+               bd->mode.count = period_len;
+
+               if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
+                       goto err_out;
+               if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+                       bd->mode.command = 0;
+               else
+                       bd->mode.command = sdmac->word_size;
+
+               param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
+               if (i + 1 == num_periods)
+                       param |= BD_WRAP;
+
+               dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+                               i, period_len, dma_addr,
+                               param & BD_WRAP ? "wrap" : "",
+                               param & BD_INTR ? " intr" : "");
+
+               bd->mode.status = param;
+
+               dma_addr += period_len;
+               buf += period_len;
+
+               i++;
+       }
+
+       sdmac->num_bd = num_periods;
+       sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+       return &sdmac->desc;
+err_out:
+       sdmac->status = DMA_ERROR;
+       return NULL;
+}
+
+static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+               unsigned long arg)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct dma_slave_config *dmaengine_cfg = (void *)arg;
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               sdma_disable_channel(sdmac);
+               return 0;
+       case DMA_SLAVE_CONFIG:
+               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+                       sdmac->per_address = dmaengine_cfg->src_addr;
+                       sdmac->watermark_level = dmaengine_cfg->src_maxburst;
+                       sdmac->word_size = dmaengine_cfg->src_addr_width;
+               } else {
+                       sdmac->per_address = dmaengine_cfg->dst_addr;
+                       sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+                       sdmac->word_size = dmaengine_cfg->dst_addr_width;
+               }
+               return sdma_config_channel(sdmac);
+       default:
+               return -ENOSYS;
+       }
+
+       return -EINVAL;
+}
+
+static enum dma_status sdma_tx_status(struct dma_chan *chan,
+                                           dma_cookie_t cookie,
+                                           struct dma_tx_state *txstate)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       dma_cookie_t last_used;
+       enum dma_status ret;
+
+       last_used = chan->cookie;
+
+       ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
+       dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+
+       return ret;
+}
+
+static void sdma_issue_pending(struct dma_chan *chan)
+{
+       /*
+        * Nothing to do. We only have a single descriptor
+        */
+}
+
+static int __init sdma_init(struct sdma_engine *sdma,
+               void *ram_code, int ram_code_size)
+{
+       int i, ret;
+       dma_addr_t ccb_phys;
+
+       switch (sdma->version) {
+       case 1:
+               sdma->num_events = 32;
+               break;
+       case 2:
+               sdma->num_events = 48;
+               break;
+       default:
+               dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
+               return -ENODEV;
+       }
+
+       clk_enable(sdma->clk);
+
+       /* Be sure SDMA has not started yet */
+       __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
+
+       sdma->channel_control = dma_alloc_coherent(NULL,
+                       MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
+                       sizeof(struct sdma_context_data),
+                       &ccb_phys, GFP_KERNEL);
+
+       if (!sdma->channel_control) {
+               ret = -ENOMEM;
+               goto err_dma_alloc;
+       }
+
+       sdma->context = (void *)sdma->channel_control +
+               MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
+       sdma->context_phys = ccb_phys +
+               MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
+
+       /* Zero-out the CCB structures array just allocated */
+       memset(sdma->channel_control, 0,
+                       MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
+
+       /* disable all channels */
+       for (i = 0; i < sdma->num_events; i++)
+               __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
+
+       /* All channels have priority 0 */
+       for (i = 0; i < MAX_DMA_CHANNELS; i++)
+               __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
+
+       ret = sdma_request_channel(&sdma->channel[0]);
+       if (ret)
+               goto err_dma_alloc;
+
+       sdma_config_ownership(&sdma->channel[0], false, true, false);
+
+       /* Set Command Channel (Channel Zero) */
+       __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
+
+       /* Set bits of CONFIG register but with static context switching */
+       /* FIXME: Check whether to set ACR bit depending on clock ratios */
+       __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
+
+       __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
+
+       /* download the RAM image for SDMA */
+       sdma_load_script(sdma, ram_code,
+                       ram_code_size,
+                       sdma->script_addrs->ram_code_start_addr);
+
+       /* Set bits of CONFIG register with given context switching mode */
+       __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+
+       /* Initializes channel's priorities */
+       sdma_set_channel_priority(&sdma->channel[0], 7);
+
+       clk_disable(sdma->clk);
+
+       return 0;
+
+err_dma_alloc:
+       clk_disable(sdma->clk);
+       dev_err(sdma->dev, "initialisation failed with %d\n", ret);
+       return ret;
+}
+
+static int __init sdma_probe(struct platform_device *pdev)
+{
+       int ret;
+       const struct firmware *fw;
+       const struct sdma_firmware_header *header;
+       const struct sdma_script_start_addrs *addr;
+       int irq;
+       unsigned short *ram_code;
+       struct resource *iores;
+       struct sdma_platform_data *pdata = pdev->dev.platform_data;
+       char *fwname;
+       int i;
+       dma_cap_mask_t mask;
+       struct sdma_engine *sdma;
+
+       sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
+       if (!sdma)
+               return -ENOMEM;
+
+       sdma->dev = &pdev->dev;
+
+       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq = platform_get_irq(pdev, 0);
+       if (!iores || irq < 0 || !pdata) {
+               ret = -EINVAL;
+               goto err_irq;
+       }
+
+       if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
+               ret = -EBUSY;
+               goto err_request_region;
+       }
+
+       sdma->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(sdma->clk)) {
+               ret = PTR_ERR(sdma->clk);
+               goto err_clk;
+       }
+
+       sdma->regs = ioremap(iores->start, resource_size(iores));
+       if (!sdma->regs) {
+               ret = -ENOMEM;
+               goto err_ioremap;
+       }
+
+       ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
+       if (ret)
+               goto err_request_irq;
+
+       fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin",
+                       pdata->cpu_name, pdata->to_version);
+       if (!fwname) {
+               ret = -ENOMEM;
+               goto err_cputype;
+       }
+
+       ret = request_firmware(&fw, fwname, &pdev->dev);
+       if (ret) {
+               dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n",
+                               fwname, ret);
+               kfree(fwname);
+               goto err_cputype;
+       }
+       kfree(fwname);
+
+       if (fw->size < sizeof(*header))
+               goto err_firmware;
+
+       header = (struct sdma_firmware_header *)fw->data;
+
+       if (header->magic != SDMA_FIRMWARE_MAGIC)
+               goto err_firmware;
+       if (header->ram_code_start + header->ram_code_size > fw->size)
+               goto err_firmware;
+
+       addr = (void *)header + header->script_addrs_start;
+       ram_code = (void *)header + header->ram_code_start;
+       sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL);
+       if (!sdma->script_addrs)
+               goto err_firmware;
+       memcpy(sdma->script_addrs, addr, sizeof(*addr));
+
+       sdma->version = pdata->sdma_version;
+
+       INIT_LIST_HEAD(&sdma->dma_device.channels);
+       /* Initialize channel parameters */
+       for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+               struct sdma_channel *sdmac = &sdma->channel[i];
+
+               sdmac->sdma = sdma;
+               spin_lock_init(&sdmac->lock);
+
+               dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+               dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
+               sdmac->chan.device = &sdma->dma_device;
+               sdmac->chan.chan_id = i;
+               sdmac->channel = i;
+
+               /* Add the channel to the DMAC list */
+               list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+       }
+
+       ret = sdma_init(sdma, ram_code, header->ram_code_size);
+       if (ret)
+               goto err_init;
+
+       sdma->dma_device.dev = &pdev->dev;
+
+       sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
+       sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
+       sdma->dma_device.device_tx_status = sdma_tx_status;
+       sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
+       sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
+       sdma->dma_device.device_control = sdma_control;
+       sdma->dma_device.device_issue_pending = sdma_issue_pending;
+
+       ret = dma_async_device_register(&sdma->dma_device);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to register\n");
+               goto err_init;
+       }
+
+       dev_info(&pdev->dev, "initialized (firmware %d.%d)\n",
+                       header->version_major,
+                       header->version_minor);
+
+       /* request channel 0. This is an internal control channel
+        * to the SDMA engine and not available to clients.
+        */
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dma_request_channel(mask, NULL, NULL);
+
+       release_firmware(fw);
+
+       return 0;
+
+err_init:
+       kfree(sdma->script_addrs);
+err_firmware:
+       release_firmware(fw);
+err_cputype:
+       free_irq(irq, sdma);
+err_request_irq:
+       iounmap(sdma->regs);
+err_ioremap:
+       clk_put(sdma->clk);
+err_clk:
+       release_mem_region(iores->start, resource_size(iores));
+err_request_region:
+err_irq:
+       kfree(sdma);
+       return 0;
+}
+
+static int __exit sdma_remove(struct platform_device *pdev)
+{
+       return -EBUSY;
+}
+
+static struct platform_driver sdma_driver = {
+       .driver         = {
+               .name   = "imx-sdma",
+       },
+       .remove         = __exit_p(sdma_remove),
+};
+
+static int __init sdma_module_init(void)
+{
+       return platform_driver_probe(&sdma_driver, sdma_probe);
+}
+subsys_initcall(sdma_module_init);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX SDMA driver");
+MODULE_LICENSE("GPL");
index c2591e8d9b6e0ede604953e59ae9182b47097c58..338bc4eed1f3f55b4de66c623a135289862c783f 100644 (file)
@@ -25,6 +25,7 @@
  */
 #include <linux/pci.h>
 #include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 #include <linux/intel_mid_dma.h>
 
 #define MAX_CHAN       4 /*max ch across controllers*/
@@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
        int byte_width = 0, block_ts = 0;
 
        switch (tx_width) {
-       case LNW_DMA_WIDTH_8BIT:
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
                byte_width = 1;
                break;
-       case LNW_DMA_WIDTH_16BIT:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
                byte_width = 2;
                break;
-       case LNW_DMA_WIDTH_32BIT:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
        default:
                byte_width = 4;
                break;
@@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
        struct middma_device *mid = to_middma_device(midc->chan.device);
 
        /*  channel is idle */
-       if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
+       if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
                /*error*/
                pr_err("ERR_MDMA: channel is busy in start\n");
                /* The tasklet will hopefully advance the queue... */
                return;
        }
-
+       midc->busy = true;
        /*write registers and en*/
        iowrite32(first->sar, midc->ch_regs + SAR);
        iowrite32(first->dar, midc->ch_regs + DAR);
+       iowrite32(first->lli_phys, midc->ch_regs + LLP);
        iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
        iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
        iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
@@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
        pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
                (int)first->sar, (int)first->dar, first->cfg_hi,
                first->cfg_lo, first->ctl_hi, first->ctl_lo);
+       first->status = DMA_IN_PROGRESS;
 
        iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
-       first->status = DMA_IN_PROGRESS;
 }
 
 /**
@@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
 {
        struct dma_async_tx_descriptor  *txd = &desc->txd;
        dma_async_tx_callback callback_txd = NULL;
+       struct intel_mid_dma_lli        *llitem;
        void *param_txd = NULL;
 
        midc->completed = txd->cookie;
        callback_txd = txd->callback;
        param_txd = txd->callback_param;
 
-       list_move(&desc->desc_node, &midc->free_list);
-
+       if (desc->lli != NULL) {
+               /*clear the DONE bit of completed LLI in memory*/
+               llitem = desc->lli + desc->current_lli;
+               llitem->ctl_hi &= CLEAR_DONE;
+               if (desc->current_lli < desc->lli_length-1)
+                       (desc->current_lli)++;
+               else
+                       desc->current_lli = 0;
+       }
        spin_unlock_bh(&midc->lock);
        if (callback_txd) {
                pr_debug("MDMA: TXD callback set ... calling\n");
                callback_txd(param_txd);
-               spin_lock_bh(&midc->lock);
-               return;
+       }
+       if (midc->raw_tfr) {
+               desc->status = DMA_SUCCESS;
+               if (desc->lli != NULL) {
+                       pci_pool_free(desc->lli_pool, desc->lli,
+                                               desc->lli_phys);
+                       pci_pool_destroy(desc->lli_pool);
+               }
+               list_move(&desc->desc_node, &midc->free_list);
+               midc->busy = false;
        }
        spin_lock_bh(&midc->lock);
 
@@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
 
        /*tx is complete*/
        list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
-               if (desc->status == DMA_IN_PROGRESS)  {
-                       desc->status = DMA_SUCCESS;
+               if (desc->status == DMA_IN_PROGRESS)
                        midc_descriptor_complete(midc, desc);
-               }
        }
        return;
-}
+       }
+/**
+ * midc_lli_fill_sg -          Helper function to convert
+ *                             SG list to Linked List Items.
+ *@midc: Channel
+ *@desc: DMA descriptor
+ *@sglist: Pointer to SG list
+ *@sglen: SG list length
+ *@flags: DMA transaction flags
+ *
+ * Walk through the SG list and convert the SG list into Linked
+ * List Items (LLI).
+ */
+static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
+                               struct intel_mid_dma_desc *desc,
+                               struct scatterlist *sglist,
+                               unsigned int sglen,
+                               unsigned int flags)
+{
+       struct intel_mid_dma_slave *mids;
+       struct scatterlist  *sg;
+       dma_addr_t lli_next, sg_phy_addr;
+       struct intel_mid_dma_lli *lli_bloc_desc;
+       union intel_mid_dma_ctl_lo ctl_lo;
+       union intel_mid_dma_ctl_hi ctl_hi;
+       int i;
 
+       pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+       mids = midc->mid_slave;
+
+       lli_bloc_desc = desc->lli;
+       lli_next = desc->lli_phys;
+
+       ctl_lo.ctl_lo = desc->ctl_lo;
+       ctl_hi.ctl_hi = desc->ctl_hi;
+       for_each_sg(sglist, sg, sglen, i) {
+               /*Populate CTL_LOW and LLI values*/
+               if (i != sglen - 1) {
+                       lli_next = lli_next +
+                               sizeof(struct intel_mid_dma_lli);
+               } else {
+               /*Check for circular list, otherwise terminate LLI to ZERO*/
+                       if (flags & DMA_PREP_CIRCULAR_LIST) {
+                               pr_debug("MDMA: LLI is configured in circular mode\n");
+                               lli_next = desc->lli_phys;
+                       } else {
+                               lli_next = 0;
+                               ctl_lo.ctlx.llp_dst_en = 0;
+                               ctl_lo.ctlx.llp_src_en = 0;
+                       }
+               }
+               /*Populate CTL_HI values*/
+               ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+                                                       desc->width,
+                                                       midc->dma->block_size);
+               /*Populate SAR and DAR values*/
+               sg_phy_addr = sg_phys(sg);
+               if (desc->dirn ==  DMA_TO_DEVICE) {
+                       lli_bloc_desc->sar  = sg_phy_addr;
+                       lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
+               } else if (desc->dirn ==  DMA_FROM_DEVICE) {
+                       lli_bloc_desc->sar  = mids->dma_slave.src_addr;
+                       lli_bloc_desc->dar  = sg_phy_addr;
+               }
+               /*Copy values into block descriptor in system memroy*/
+               lli_bloc_desc->llp = lli_next;
+               lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
+               lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+
+               lli_bloc_desc++;
+       }
+       /*Copy very first LLI values to descriptor*/
+       desc->ctl_lo = desc->lli->ctl_lo;
+       desc->ctl_hi = desc->lli->ctl_hi;
+       desc->sar = desc->lli->sar;
+       desc->dar = desc->lli->dar;
+
+       return 0;
+}
 /*****************************************************************************
 DMA engine callback Functions*/
 /**
@@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        desc->txd.cookie = cookie;
 
 
-       if (list_empty(&midc->active_list)) {
-               midc_dostart(midc, desc);
+       if (list_empty(&midc->active_list))
                list_add_tail(&desc->desc_node, &midc->active_list);
-       } else {
+       else
                list_add_tail(&desc->desc_node, &midc->queue);
-       }
+
+       midc_dostart(midc, desc);
        spin_unlock_bh(&midc->lock);
 
        return cookie;
@@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
        return ret;
 }
 
+static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
+{
+       struct intel_mid_dma_chan       *midc = to_intel_mid_dma_chan(chan);
+       struct dma_slave_config  *slave = (struct dma_slave_config *)arg;
+       struct intel_mid_dma_slave *mid_slave;
+
+       BUG_ON(!midc);
+       BUG_ON(!slave);
+       pr_debug("MDMA: slave control called\n");
+
+       mid_slave = to_intel_mid_dma_slave(slave);
+
+       BUG_ON(!mid_slave);
+
+       midc->mid_slave = mid_slave;
+       return 0;
+}
 /**
  * intel_mid_dma_device_control -      DMA device control
  * @chan: chan for DMA control
@@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
        struct intel_mid_dma_chan       *midc = to_intel_mid_dma_chan(chan);
        struct middma_device    *mid = to_middma_device(chan->device);
        struct intel_mid_dma_desc       *desc, *_desc;
-       LIST_HEAD(list);
+       union intel_mid_dma_cfg_lo cfg_lo;
+
+       if (cmd == DMA_SLAVE_CONFIG)
+               return dma_slave_control(chan, arg);
 
        if (cmd != DMA_TERMINATE_ALL)
                return -ENXIO;
 
        spin_lock_bh(&midc->lock);
-       if (midc->in_use == false) {
+       if (midc->busy == false) {
                spin_unlock_bh(&midc->lock);
                return 0;
        }
-       list_splice_init(&midc->free_list, &list);
-       midc->descs_allocated = 0;
-       midc->slave = NULL;
-
+       /*Suspend and disable the channel*/
+       cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+       cfg_lo.cfgx.ch_susp = 1;
+       iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+       iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+       midc->busy = false;
        /* Disable interrupts */
        disable_dma_interrupt(midc);
+       midc->descs_allocated = 0;
 
        spin_unlock_bh(&midc->lock);
-       list_for_each_entry_safe(desc, _desc, &list, desc_node) {
-               pr_debug("MDMA: freeing descriptor %p\n", desc);
-               pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+       list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+               if (desc->lli != NULL) {
+                       pci_pool_free(desc->lli_pool, desc->lli,
+                                               desc->lli_phys);
+                       pci_pool_destroy(desc->lli_pool);
+               }
+               list_move(&desc->desc_node, &midc->free_list);
        }
        return 0;
 }
 
-/**
- * intel_mid_dma_prep_slave_sg -       Prep slave sg txn
- * @chan: chan for DMA transfer
- * @sgl: scatter gather list
- * @sg_len: length of sg txn
- * @direction: DMA transfer dirtn
- * @flags: DMA flags
- *
- * Do DMA sg txn: NOT supported now
- */
-static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
-                       struct dma_chan *chan, struct scatterlist *sgl,
-                       unsigned int sg_len, enum dma_data_direction direction,
-                       unsigned long flags)
-{
-       /*not supported now*/
-       return NULL;
-}
 
 /**
  * intel_mid_dma_prep_memcpy - Prep memcpy txn
@@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
        union intel_mid_dma_ctl_hi ctl_hi;
        union intel_mid_dma_cfg_lo cfg_lo;
        union intel_mid_dma_cfg_hi cfg_hi;
-       enum intel_mid_dma_width width = 0;
+       enum dma_slave_buswidth width;
 
        pr_debug("MDMA: Prep for memcpy\n");
-       WARN_ON(!chan);
+       BUG_ON(!chan);
        if (!len)
                return NULL;
 
-       mids = chan->private;
-       WARN_ON(!mids);
-
        midc = to_intel_mid_dma_chan(chan);
-       WARN_ON(!midc);
+       BUG_ON(!midc);
+
+       mids = midc->mid_slave;
+       BUG_ON(!mids);
 
        pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
                                midc->dma->pci_id, midc->ch_id, len);
        pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
-               mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
+                       mids->cfg_mode, mids->dma_slave.direction,
+                       mids->hs_mode, mids->dma_slave.src_addr_width);
 
        /*calculate CFG_LO*/
        if (mids->hs_mode == LNW_DMA_SW_HS) {
@@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
                if (midc->dma->pimr_mask) {
                        cfg_hi.cfgx.protctl = 0x0; /*default value*/
                        cfg_hi.cfgx.fifo_mode = 1;
-                       if (mids->dirn == DMA_TO_DEVICE) {
+                       if (mids->dma_slave.direction == DMA_TO_DEVICE) {
                                cfg_hi.cfgx.src_per = 0;
                                if (mids->device_instance == 0)
                                        cfg_hi.cfgx.dst_per = 3;
                                if (mids->device_instance == 1)
                                        cfg_hi.cfgx.dst_per = 1;
-                       } else if (mids->dirn == DMA_FROM_DEVICE) {
+                       } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
                                if (mids->device_instance == 0)
                                        cfg_hi.cfgx.src_per = 2;
                                if (mids->device_instance == 1)
@@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
 
        /*calculate CTL_HI*/
        ctl_hi.ctlx.reser = 0;
-       width = mids->src_width;
+       ctl_hi.ctlx.done  = 0;
+       width = mids->dma_slave.src_addr_width;
 
        ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
        pr_debug("MDMA:calc len %d for block size %d\n",
@@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
        /*calculate CTL_LO*/
        ctl_lo.ctl_lo = 0;
        ctl_lo.ctlx.int_en = 1;
-       ctl_lo.ctlx.dst_tr_width = mids->dst_width;
-       ctl_lo.ctlx.src_tr_width = mids->src_width;
-       ctl_lo.ctlx.dst_msize = mids->src_msize;
-       ctl_lo.ctlx.src_msize = mids->dst_msize;
+       ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
+       ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
+       ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
+       ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
 
        if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
                ctl_lo.ctlx.tt_fc = 0;
                ctl_lo.ctlx.sinc = 0;
                ctl_lo.ctlx.dinc = 0;
        } else {
-               if (mids->dirn == DMA_TO_DEVICE) {
+               if (mids->dma_slave.direction == DMA_TO_DEVICE) {
                        ctl_lo.ctlx.sinc = 0;
                        ctl_lo.ctlx.dinc = 2;
                        ctl_lo.ctlx.tt_fc = 1;
-               } else if (mids->dirn == DMA_FROM_DEVICE) {
+               } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
                        ctl_lo.ctlx.sinc = 2;
                        ctl_lo.ctlx.dinc = 0;
                        ctl_lo.ctlx.tt_fc = 2;
@@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
        desc->ctl_lo = ctl_lo.ctl_lo;
        desc->ctl_hi = ctl_hi.ctl_hi;
        desc->width = width;
-       desc->dirn = mids->dirn;
+       desc->dirn = mids->dma_slave.direction;
+       desc->lli_phys = 0;
+       desc->lli = NULL;
+       desc->lli_pool = NULL;
        return &desc->txd;
 
 err_desc_get:
@@ -605,6 +712,85 @@ err_desc_get:
        midc_desc_put(midc, desc);
        return NULL;
 }
+/**
+ * intel_mid_dma_prep_slave_sg -       Prep slave sg txn
+ * @chan: chan for DMA transfer
+ * @sgl: scatter gather list
+ * @sg_len: length of sg txn
+ * @direction: DMA transfer dirtn
+ * @flags: DMA flags
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
+                       struct dma_chan *chan, struct scatterlist *sgl,
+                       unsigned int sg_len, enum dma_data_direction direction,
+                       unsigned long flags)
+{
+       struct intel_mid_dma_chan *midc = NULL;
+       struct intel_mid_dma_slave *mids = NULL;
+       struct intel_mid_dma_desc *desc = NULL;
+       struct dma_async_tx_descriptor *txd = NULL;
+       union intel_mid_dma_ctl_lo ctl_lo;
+
+       pr_debug("MDMA: Prep for slave SG\n");
+
+       if (!sg_len) {
+               pr_err("MDMA: Invalid SG length\n");
+               return NULL;
+       }
+       midc = to_intel_mid_dma_chan(chan);
+       BUG_ON(!midc);
+
+       mids = midc->mid_slave;
+       BUG_ON(!mids);
+
+       if (!midc->dma->pimr_mask) {
+               pr_debug("MDMA: SG list is not supported by this controller\n");
+               return  NULL;
+       }
+
+       pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
+                       sg_len, direction, flags);
+
+       txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+       if (NULL == txd) {
+               pr_err("MDMA: Prep memcpy failed\n");
+               return NULL;
+       }
+       desc = to_intel_mid_dma_desc(txd);
+       desc->dirn = direction;
+       ctl_lo.ctl_lo = desc->ctl_lo;
+       ctl_lo.ctlx.llp_dst_en = 1;
+       ctl_lo.ctlx.llp_src_en = 1;
+       desc->ctl_lo = ctl_lo.ctl_lo;
+       desc->lli_length = sg_len;
+       desc->current_lli = 0;
+       /* DMA coherent memory pool for LLI descriptors*/
+       desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
+                               midc->dma->pdev,
+                               (sizeof(struct intel_mid_dma_lli)*sg_len),
+                               32, 0);
+       if (NULL == desc->lli_pool) {
+               pr_err("MID_DMA:LLI pool create failed\n");
+               return NULL;
+       }
+
+       desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+       if (!desc->lli) {
+               pr_err("MID_DMA: LLI alloc failed\n");
+               pci_pool_destroy(desc->lli_pool);
+               return NULL;
+       }
+
+       midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
+       if (flags & DMA_PREP_INTERRUPT) {
+               iowrite32(UNMASK_INTR_REG(midc->ch_id),
+                               midc->dma_base + MASK_BLOCK);
+               pr_debug("MDMA:Enabled Block interrupt\n");
+       }
+       return &desc->txd;
+}
 
 /**
  * intel_mid_dma_free_chan_resources - Frees dma resources
@@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
        struct middma_device    *mid = to_middma_device(chan->device);
        struct intel_mid_dma_desc       *desc, *_desc;
 
-       if (true == midc->in_use) {
+       if (true == midc->busy) {
                /*trying to free ch in use!!!!!*/
                pr_err("ERR_MDMA: trying to free ch in use\n");
        }
-
+       pm_runtime_put(&mid->pdev->dev);
        spin_lock_bh(&midc->lock);
        midc->descs_allocated = 0;
        list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
        }
        spin_unlock_bh(&midc->lock);
        midc->in_use = false;
+       midc->busy = false;
        /* Disable CH interrupts */
        iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
        iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
@@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
        dma_addr_t              phys;
        int     i = 0;
 
+       pm_runtime_get_sync(&mid->pdev->dev);
+
+       if (mid->state == SUSPENDED) {
+               if (dma_resume(mid->pdev)) {
+                       pr_err("ERR_MDMA: resume failed");
+                       return -EFAULT;
+               }
+       }
 
        /* ASSERT:  channel is idle */
        if (test_ch_en(mid->dma_base, midc->ch_id)) {
                /*ch is not idle*/
                pr_err("ERR_MDMA: ch not idle\n");
+               pm_runtime_put(&mid->pdev->dev);
                return -EIO;
        }
        midc->completed = chan->cookie = 1;
@@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
                desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
                if (!desc) {
                        pr_err("ERR_MDMA: desc failed\n");
+                       pm_runtime_put(&mid->pdev->dev);
                        return -ENOMEM;
                        /*check*/
                }
@@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
                list_add_tail(&desc->desc_node, &midc->free_list);
        }
        spin_unlock_bh(&midc->lock);
-       midc->in_use = false;
+       midc->in_use = true;
+       midc->busy = false;
        pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
        return i;
 }
@@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
 {
        struct middma_device *mid = NULL;
        struct intel_mid_dma_chan *midc = NULL;
-       u32 status;
+       u32 status, raw_tfr, raw_block;
        int i;
 
        mid = (struct middma_device *)data;
@@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
                return;
        }
        pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
-       status = ioread32(mid->dma_base + RAW_TFR);
-       pr_debug("MDMA:RAW_TFR %x\n", status);
+       raw_tfr = ioread32(mid->dma_base + RAW_TFR);
+       raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+       status = raw_tfr | raw_block;
        status &= mid->intr_mask;
        while (status) {
                /*txn interrupt*/
@@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
                }
                pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
                                status, midc->ch_id, i);
+               midc->raw_tfr = raw_tfr;
+               midc->raw_block = raw_block;
+               spin_lock_bh(&midc->lock);
                /*clearing this interrupts first*/
                iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
-               iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
-
-               spin_lock_bh(&midc->lock);
+               if (raw_block) {
+                       iowrite32((1 << midc->ch_id),
+                               mid->dma_base + CLEAR_BLOCK);
+               }
                midc_scan_descriptors(mid, midc);
                pr_debug("MDMA:Scan of desc... complete, unmasking\n");
                iowrite32(UNMASK_INTR_REG(midc->ch_id),
                                mid->dma_base + MASK_TFR);
+               if (raw_block) {
+                       iowrite32(UNMASK_INTR_REG(midc->ch_id),
+                               mid->dma_base + MASK_BLOCK);
+               }
                spin_unlock_bh(&midc->lock);
        }
 
@@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
 {
        struct middma_device *mid = data;
-       u32 status;
+       u32 tfr_status, err_status;
        int call_tasklet = 0;
 
+       tfr_status = ioread32(mid->dma_base + RAW_TFR);
+       err_status = ioread32(mid->dma_base + RAW_ERR);
+       if (!tfr_status && !err_status)
+               return IRQ_NONE;
+
        /*DMA Interrupt*/
        pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
        if (!mid) {
@@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
                return -EINVAL;
        }
 
-       status = ioread32(mid->dma_base + RAW_TFR);
-       pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
-       status &= mid->intr_mask;
-       if (status) {
+       pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
+       tfr_status &= mid->intr_mask;
+       if (tfr_status) {
                /*need to disable intr*/
-               iowrite32((status << 8), mid->dma_base + MASK_TFR);
-               pr_debug("MDMA: Calling tasklet %x\n", status);
+               iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
+               iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
+               pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
                call_tasklet = 1;
        }
-       status = ioread32(mid->dma_base + RAW_ERR);
-       status &= mid->intr_mask;
-       if (status) {
-               iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
+       err_status &= mid->intr_mask;
+       if (err_status) {
+               iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
                call_tasklet = 1;
        }
        if (call_tasklet)
@@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
 {
        struct middma_device *dma = pci_get_drvdata(pdev);
        int err, i;
-       unsigned int irq_level;
 
        /* DMA coherent memory pool for DMA descriptor allocations */
        dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
@@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
        pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
        /*init CH structures*/
        dma->intr_mask = 0;
+       dma->state = RUNNING;
        for (i = 0; i < dma->max_chan; i++) {
                struct intel_mid_dma_chan *midch = &dma->ch[i];
 
@@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
 
        /*register irq */
        if (dma->pimr_mask) {
-               irq_level = IRQF_SHARED;
                pr_debug("MDMA:Requesting irq shared for DMAC1\n");
                err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
                        IRQF_SHARED, "INTEL_MID_DMAC1", dma);
@@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
                        goto err_irq;
        } else {
                dma->intr_mask = 0x03;
-               irq_level = 0;
                pr_debug("MDMA:Requesting irq for DMAC2\n");
                err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
-                       0, "INTEL_MID_DMAC2", dma);
+                       IRQF_SHARED, "INTEL_MID_DMAC2", dma);
                if (0 != err)
                        goto err_irq;
        }
@@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
        if (err)
                goto err_dma;
 
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_allow(&pdev->dev);
        return 0;
 
 err_dma:
@@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+/* Power Management */
+/*
+* dma_suspend - PCI suspend function
+*
+* @pci: PCI device structure
+* @state: PM message
+*
+* This function is called by OS when a power event occurs
+*/
+int dma_suspend(struct pci_dev *pci, pm_message_t state)
+{
+       int i;
+       struct middma_device *device = pci_get_drvdata(pci);
+       pr_debug("MDMA: dma_suspend called\n");
+
+       for (i = 0; i < device->max_chan; i++) {
+               if (device->ch[i].in_use)
+                       return -EAGAIN;
+       }
+       device->state = SUSPENDED;
+       pci_set_drvdata(pci, device);
+       pci_save_state(pci);
+       pci_disable_device(pci);
+       pci_set_power_state(pci, PCI_D3hot);
+       return 0;
+}
+
+/**
+* dma_resume - PCI resume function
+*
+* @pci:        PCI device structure
+*
+* This function is called by OS when a power event occurs
+*/
+int dma_resume(struct pci_dev *pci)
+{
+       int ret;
+       struct middma_device *device = pci_get_drvdata(pci);
+
+       pr_debug("MDMA: dma_resume called\n");
+       pci_set_power_state(pci, PCI_D0);
+       pci_restore_state(pci);
+       ret = pci_enable_device(pci);
+       if (ret) {
+               pr_err("MDMA: device cant be enabled for %x\n", pci->device);
+               return ret;
+       }
+       device->state = RUNNING;
+       iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+       pci_set_drvdata(pci, device);
+       return 0;
+}
+
+static int dma_runtime_suspend(struct device *dev)
+{
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       return dma_suspend(pci_dev, PMSG_SUSPEND);
+}
+
+static int dma_runtime_resume(struct device *dev)
+{
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       return dma_resume(pci_dev);
+}
+
+static int dma_runtime_idle(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct middma_device *device = pci_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < device->max_chan; i++) {
+               if (device->ch[i].in_use)
+                       return -EAGAIN;
+       }
+
+       return pm_schedule_suspend(dev, 0);
+}
+
 /******************************************************************************
 * PCI stuff
 */
@@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
 
+static const struct dev_pm_ops intel_mid_dma_pm = {
+       .runtime_suspend = dma_runtime_suspend,
+       .runtime_resume = dma_runtime_resume,
+       .runtime_idle = dma_runtime_idle,
+};
+
 static struct pci_driver intel_mid_dma_pci = {
        .name           =       "Intel MID DMA",
        .id_table       =       intel_mid_dma_ids,
        .probe          =       intel_mid_dma_probe,
        .remove         =       __devexit_p(intel_mid_dma_remove),
+#ifdef CONFIG_PM
+       .suspend = dma_suspend,
+       .resume = dma_resume,
+       .driver = {
+               .pm = &intel_mid_dma_pm,
+       },
+#endif
 };
 
 static int __init intel_mid_dma_init(void)
index d81aa658ab092968cf66a7f28487298fd2f649b2..709fecbdde7951543392324b8e628853453d39b1 100644 (file)
 #include <linux/dmapool.h>
 #include <linux/pci_ids.h>
 
-#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
+#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
 
 #define        REG_BIT0                0x00000001
 #define        REG_BIT8                0x00000100
-
+#define INT_MASK_WE            0x8
+#define CLEAR_DONE             0xFFFFEFFF
 #define UNMASK_INTR_REG(chan_num) \
        ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
 #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
@@ -41,6 +42,9 @@
 #define ENABLE_CHANNEL(chan_num) \
        ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
 
+#define DISABLE_CHANNEL(chan_num) \
+       (REG_BIT8 << chan_num)
+
 #define DESCS_PER_CHANNEL      16
 /*DMA Registers*/
 /*registers associated with channel programming*/
@@ -50,6 +54,7 @@
 /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
 #define SAR                    0x00 /* Source Address Register*/
 #define DAR                    0x08 /* Destination Address Register*/
+#define LLP                    0x10 /* Linked List Pointer Register*/
 #define CTL_LOW                        0x18 /* Control Register*/
 #define CTL_HIGH               0x1C /* Control Register*/
 #define CFG_LOW                        0x40 /* Configuration Register Low*/
@@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo {
 union intel_mid_dma_ctl_hi {
        struct {
                u32     block_ts:12;    /*block transfer size*/
-                                       /*configured by DMAC*/
-               u32     reser:20;
+               u32     done:1;         /*Done - updated by DMAC*/
+               u32     reser:19;       /*configured by DMAC*/
        } ctlx;
        u32     ctl_hi;
 
@@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi {
        u32     cfg_hi;
 };
 
+
 /**
  * struct intel_mid_dma_chan - internal mid representation of a DMA channel
  * @chan: dma_chan strcture represetation for mid chan
@@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi {
  * @slave: dma slave struture
  * @descs_allocated: total number of decsiptors allocated
  * @dma: dma device struture pointer
+ * @busy: bool representing if ch is busy (active txn) or not
  * @in_use: bool representing if ch is in use or not
+ * @raw_tfr: raw trf interrupt recieved
+ * @raw_block: raw block interrupt recieved
  */
 struct intel_mid_dma_chan {
        struct dma_chan         chan;
@@ -178,10 +187,13 @@ struct intel_mid_dma_chan {
        struct list_head        active_list;
        struct list_head        queue;
        struct list_head        free_list;
-       struct intel_mid_dma_slave      *slave;
        unsigned int            descs_allocated;
        struct middma_device    *dma;
+       bool                    busy;
        bool                    in_use;
+       u32                     raw_tfr;
+       u32                     raw_block;
+       struct intel_mid_dma_slave *mid_slave;
 };
 
 static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
        return container_of(chan, struct intel_mid_dma_chan, chan);
 }
 
+enum intel_mid_dma_state {
+       RUNNING = 0,
+       SUSPENDED,
+};
 /**
  * struct middma_device - internal representation of a DMA device
  * @pdev: PCI device
@@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
  * @max_chan: max number of chs supported (from drv_data)
  * @block_size: Block size of DMA transfer supported (from drv_data)
  * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
+ * @state: dma PM device state
  */
 struct middma_device {
        struct pci_dev          *pdev;
@@ -220,6 +237,7 @@ struct middma_device {
        int                     max_chan;
        int                     block_size;
        unsigned int            pimr_mask;
+       enum intel_mid_dma_state state;
 };
 
 static inline struct middma_device *to_middma_device(struct dma_device *common)
@@ -238,14 +256,27 @@ struct intel_mid_dma_desc {
        u32                             cfg_lo;
        u32                             ctl_lo;
        u32                             ctl_hi;
+       struct pci_pool                 *lli_pool;
+       struct intel_mid_dma_lli        *lli;
+       dma_addr_t                      lli_phys;
+       unsigned int                    lli_length;
+       unsigned int                    current_lli;
        dma_addr_t                      next;
        enum dma_data_direction         dirn;
        enum dma_status                 status;
-       enum intel_mid_dma_width        width; /*width of DMA txn*/
+       enum dma_slave_buswidth         width; /*width of DMA txn*/
        enum intel_mid_dma_mode         cfg_mode; /*mode configuration*/
 
 };
 
+struct intel_mid_dma_lli {
+       dma_addr_t                      sar;
+       dma_addr_t                      dar;
+       dma_addr_t                      llp;
+       u32                             ctl_lo;
+       u32                             ctl_hi;
+} __attribute__ ((packed));
+
 static inline int test_ch_en(void __iomem *dma, u32 ch_no)
 {
        u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
 {
        return container_of(txd, struct intel_mid_dma_desc, txd);
 }
+
+static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
+               (struct dma_slave_config *slave)
+{
+       return container_of(slave, struct intel_mid_dma_slave, dma_slave);
+}
+
+
+int dma_resume(struct pci_dev *pci);
+
 #endif /*__INTEL_MID_DMAC_REGS_H__*/
index 3533948b88ba919de7a1ba3c464b3a49108fc5bc..92b679024fedd9e033e9cbae311f7f8d7f221142 100644 (file)
@@ -926,6 +926,7 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
 static const struct pci_device_id pch_dma_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+       { 0, },
 };
 
 static struct pci_driver pch_dma_driver = {
index 17e2600a00cf5c292bb1c99c35ffe96eaa4cbc57..fab68a5532054650f979143d2508827a458da9e9 100644 (file)
@@ -1,11 +1,8 @@
 /*
- * driver/dma/ste_dma40.c
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
  */
 
 #include <linux/kernel.h>
@@ -14,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/err.h>
 
 #include <plat/ste_dma40.h>
 
 
 /* Hardware requirement on LCLA alignment */
 #define LCLA_ALIGNMENT 0x40000
+
+/* Max number of links per event group */
+#define D40_LCLA_LINK_PER_EVENT_GRP 128
+#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
+
 /* Attempts before giving up to trying to get pages that are aligned */
 #define MAX_LCLA_ALLOC_ATTEMPTS 256
 
@@ -41,7 +44,7 @@
 #define D40_ALLOC_LOG_FREE     0
 
 /* Hardware designer of the block */
-#define D40_PERIPHID2_DESIGNER 0x8
+#define D40_HW_DESIGNER 0x8
 
 /**
  * enum 40_command - The different commands and/or statuses.
@@ -84,18 +87,17 @@ struct d40_lli_pool {
  * @lli_log: Same as above but for logical channels.
  * @lli_pool: The pool with two entries pre-allocated.
  * @lli_len: Number of llis of current descriptor.
- * @lli_count: Number of transfered llis.
- * @lli_tx_len: Max number of LLIs per transfer, there can be
- * many transfer for one descriptor.
+ * @lli_current: Number of transfered llis.
+ * @lcla_alloc: Number of LCLA entries allocated.
  * @txd: DMA engine struct. Used for among other things for communication
  * during a transfer.
  * @node: List entry.
- * @dir: The transfer direction of this job.
  * @is_in_client_list: true if the client owns this descriptor.
+ * @is_hw_linked: true if this job will automatically be continued for
+ * the previous one.
  *
  * This descriptor is used for both logical and physical transfers.
  */
-
 struct d40_desc {
        /* LLI physical */
        struct d40_phy_lli_bidir         lli_phy;
@@ -104,14 +106,14 @@ struct d40_desc {
 
        struct d40_lli_pool              lli_pool;
        int                              lli_len;
-       int                              lli_count;
-       u32                              lli_tx_len;
+       int                              lli_current;
+       int                              lcla_alloc;
 
        struct dma_async_tx_descriptor   txd;
        struct list_head                 node;
 
-       enum dma_data_direction          dir;
        bool                             is_in_client_list;
+       bool                             is_hw_linked;
 };
 
 /**
@@ -123,17 +125,14 @@ struct d40_desc {
  * @pages: The number of pages needed for all physical channels.
  * Only used later for clean-up on error
  * @lock: Lock to protect the content in this struct.
- * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
- * @num_blocks: The number of entries of alloc_map. Equals to the
- * number of physical channels.
+ * @alloc_map: big map over which LCLA entry is own by which job.
  */
 struct d40_lcla_pool {
        void            *base;
        void            *base_unaligned;
        int              pages;
        spinlock_t       lock;
-       u32             *alloc_map;
-       int              num_blocks;
+       struct d40_desc **alloc_map;
 };
 
 /**
@@ -146,9 +145,7 @@ struct d40_lcla_pool {
  * this physical channel. Can also be free or physically allocated.
  * @allocated_dst: Same as for src but is dst.
  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
- * event line number. Both allocated_src and allocated_dst can not be
- * allocated to a physical channel, since the interrupt handler has then
- * no way of figure out which one the interrupt belongs to.
+ * event line number.
  */
 struct d40_phy_res {
        spinlock_t lock;
@@ -178,6 +175,7 @@ struct d40_base;
  * @active: Active descriptor.
  * @queue: Queued jobs.
  * @dma_cfg: The client configuration of this dma channel.
+ * @configured: whether the dma_cfg configuration is valid
  * @base: Pointer to the device instance struct.
  * @src_def_cfg: Default cfg register setting for src.
  * @dst_def_cfg: Default cfg register setting for dst.
@@ -201,12 +199,12 @@ struct d40_chan {
        struct list_head                 active;
        struct list_head                 queue;
        struct stedma40_chan_cfg         dma_cfg;
+       bool                             configured;
        struct d40_base                 *base;
        /* Default register configurations */
        u32                              src_def_cfg;
        u32                              dst_def_cfg;
        struct d40_def_lcsp              log_def;
-       struct d40_lcla_elem             lcla;
        struct d40_log_lli_full         *lcpa;
        /* Runtime reconfiguration */
        dma_addr_t                      runtime_addr;
@@ -234,7 +232,6 @@ struct d40_chan {
  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  * @dma_slave: dma_device channels that can do only do slave transfers.
  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
- * @phy_chans: Room for all possible physical channels in system.
  * @log_chans: Room for all possible logical channels in system.
  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  * to log_chans entries.
@@ -340,9 +337,6 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
                                              align);
                d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
                                              align);
-
-               d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
-               d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
        }
 
        return 0;
@@ -357,22 +351,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
        d40d->lli_log.dst = NULL;
        d40d->lli_phy.src = NULL;
        d40d->lli_phy.dst = NULL;
-       d40d->lli_phy.src_addr = 0;
-       d40d->lli_phy.dst_addr = 0;
 }
 
-static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
-                                     struct d40_desc *desc)
+static int d40_lcla_alloc_one(struct d40_chan *d40c,
+                             struct d40_desc *d40d)
 {
-       dma_cookie_t cookie = d40c->chan.cookie;
+       unsigned long flags;
+       int i;
+       int ret = -EINVAL;
+       int p;
 
-       if (++cookie < 0)
-               cookie = 1;
+       spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+       p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
 
-       d40c->chan.cookie = cookie;
-       desc->txd.cookie = cookie;
+       /*
+        * Allocate both src and dst at the same time, therefore the half
+        * start on 1 since 0 can't be used since zero is used as end marker.
+        */
+       for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
+               if (!d40c->base->lcla_pool.alloc_map[p + i]) {
+                       d40c->base->lcla_pool.alloc_map[p + i] = d40d;
+                       d40d->lcla_alloc++;
+                       ret = i;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
+
+       return ret;
+}
+
+static int d40_lcla_free_all(struct d40_chan *d40c,
+                            struct d40_desc *d40d)
+{
+       unsigned long flags;
+       int i;
+       int ret = -EINVAL;
+
+       if (d40c->log_num == D40_PHY_CHAN)
+               return 0;
+
+       spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+       for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
+               if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
+                                                   D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
+                       d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
+                                                       D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
+                       d40d->lcla_alloc--;
+                       if (d40d->lcla_alloc == 0) {
+                               ret = 0;
+                               break;
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
+
+       return ret;
 
-       return cookie;
 }
 
 static void d40_desc_remove(struct d40_desc *d40d)
@@ -382,28 +421,35 @@ static void d40_desc_remove(struct d40_desc *d40d)
 
 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 {
-       struct d40_desc *d;
-       struct d40_desc *_d;
+       struct d40_desc *desc = NULL;
 
        if (!list_empty(&d40c->client)) {
+               struct d40_desc *d;
+               struct d40_desc *_d;
+
                list_for_each_entry_safe(d, _d, &d40c->client, node)
                        if (async_tx_test_ack(&d->txd)) {
                                d40_pool_lli_free(d);
                                d40_desc_remove(d);
+                               desc = d;
+                               memset(desc, 0, sizeof(*desc));
                                break;
                        }
-       } else {
-               d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
-               if (d != NULL) {
-                       memset(d, 0, sizeof(struct d40_desc));
-                       INIT_LIST_HEAD(&d->node);
-               }
        }
-       return d;
+
+       if (!desc)
+               desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
+
+       if (desc)
+               INIT_LIST_HEAD(&desc->node);
+
+       return desc;
 }
 
 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
 {
+
+       d40_lcla_free_all(d40c, d40d);
        kmem_cache_free(d40c->base->desc_slab, d40d);
 }
 
@@ -412,6 +458,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
        list_add_tail(&desc->node, &d40c->active);
 }
 
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+       int curr_lcla = -EINVAL, next_lcla;
+
+       if (d40c->log_num == D40_PHY_CHAN) {
+               d40_phy_lli_write(d40c->base->virtbase,
+                                 d40c->phy_chan->num,
+                                 d40d->lli_phy.dst,
+                                 d40d->lli_phy.src);
+               d40d->lli_current = d40d->lli_len;
+       } else {
+
+               if ((d40d->lli_len - d40d->lli_current) > 1)
+                       curr_lcla = d40_lcla_alloc_one(d40c, d40d);
+
+               d40_log_lli_lcpa_write(d40c->lcpa,
+                                      &d40d->lli_log.dst[d40d->lli_current],
+                                      &d40d->lli_log.src[d40d->lli_current],
+                                      curr_lcla);
+
+               d40d->lli_current++;
+               for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
+                       struct d40_log_lli *lcla;
+
+                       if (d40d->lli_current + 1 < d40d->lli_len)
+                               next_lcla = d40_lcla_alloc_one(d40c, d40d);
+                       else
+                               next_lcla = -EINVAL;
+
+                       lcla = d40c->base->lcla_pool.base +
+                               d40c->phy_chan->num * 1024 +
+                               8 * curr_lcla * 2;
+
+                       d40_log_lli_lcla_write(lcla,
+                                              &d40d->lli_log.dst[d40d->lli_current],
+                                              &d40d->lli_log.src[d40d->lli_current],
+                                              next_lcla);
+
+                       (void) dma_map_single(d40c->base->dev, lcla,
+                                             2 * sizeof(struct d40_log_lli),
+                                             DMA_TO_DEVICE);
+
+                       curr_lcla = next_lcla;
+
+                       if (curr_lcla == -EINVAL) {
+                               d40d->lli_current++;
+                               break;
+                       }
+
+               }
+       }
+}
+
 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
 {
        struct d40_desc *d;
@@ -443,68 +542,26 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
        return d;
 }
 
-/* Support functions for logical channels */
-
-static int d40_lcla_id_get(struct d40_chan *d40c)
+static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
 {
-       int src_id = 0;
-       int dst_id = 0;
-       struct d40_log_lli *lcla_lidx_base =
-               d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
-       int i;
-       int lli_per_log = d40c->base->plat_data->llis_per_log;
-       unsigned long flags;
-
-       if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
-               return 0;
-
-       if (d40c->base->lcla_pool.num_blocks > 32)
-               return -EINVAL;
-
-       spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
-
-       for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
-               if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
-                     (0x1 << i))) {
-                       d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
-                               (0x1 << i);
-                       break;
-               }
-       }
-       src_id = i;
-       if (src_id >= d40c->base->lcla_pool.num_blocks)
-               goto err;
+       struct d40_desc *d;
 
-       for (; i < d40c->base->lcla_pool.num_blocks; i++) {
-               if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
-                     (0x1 << i))) {
-                       d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
-                               (0x1 << i);
+       if (list_empty(&d40c->queue))
+               return NULL;
+       list_for_each_entry(d, &d40c->queue, node)
+               if (list_is_last(&d->node, &d40c->queue))
                        break;
-               }
-       }
-
-       dst_id = i;
-       if (dst_id == src_id)
-               goto err;
-
-       d40c->lcla.src_id = src_id;
-       d40c->lcla.dst_id = dst_id;
-       d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
-       d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
-
-       spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
-       return 0;
-err:
-       spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
-       return -EINVAL;
+       return d;
 }
 
+/* Support functions for logical channels */
+
 
 static int d40_channel_execute_command(struct d40_chan *d40c,
                                       enum d40_command command)
 {
-       int status, i;
+       u32 status;
+       int i;
        void __iomem *active_reg;
        int ret = 0;
        unsigned long flags;
@@ -567,35 +624,19 @@ done:
 static void d40_term_all(struct d40_chan *d40c)
 {
        struct d40_desc *d40d;
-       unsigned long flags;
 
        /* Release active descriptors */
        while ((d40d = d40_first_active_get(d40c))) {
                d40_desc_remove(d40d);
-
-               /* Return desc to free-list */
                d40_desc_free(d40c, d40d);
        }
 
        /* Release queued descriptors waiting for transfer */
        while ((d40d = d40_first_queued(d40c))) {
                d40_desc_remove(d40d);
-
-               /* Return desc to free-list */
                d40_desc_free(d40c, d40d);
        }
 
-       spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
-
-       d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
-               (~(0x1 << d40c->lcla.dst_id));
-       d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
-               (~(0x1 << d40c->lcla.src_id));
-
-       d40c->lcla.src_id = -1;
-       d40c->lcla.dst_id = -1;
-
-       spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 
        d40c->pending_tx = 0;
        d40c->busy = false;
@@ -640,45 +681,47 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
 
 static u32 d40_chan_has_events(struct d40_chan *d40c)
 {
-       u32 val = 0;
+       u32 val;
 
-       /* If SSLNK or SDLNK is zero all events are disabled */
-       if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
-           (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
-               val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
-                           d40c->phy_chan->num * D40_DREG_PCDELTA +
-                           D40_CHAN_REG_SSLNK);
-
-       if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM)
-               val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
-                           d40c->phy_chan->num * D40_DREG_PCDELTA +
-                           D40_CHAN_REG_SDLNK);
+       val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                   d40c->phy_chan->num * D40_DREG_PCDELTA +
+                   D40_CHAN_REG_SSLNK);
+
+       val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                    d40c->phy_chan->num * D40_DREG_PCDELTA +
+                    D40_CHAN_REG_SDLNK);
        return val;
 }
 
-static void d40_config_enable_lidx(struct d40_chan *d40c)
+static u32 d40_get_prmo(struct d40_chan *d40c)
 {
-       /* Set LIDX for lcla */
-       writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
-              D40_SREG_ELEM_LOG_LIDX_MASK,
-              d40c->base->virtbase + D40_DREG_PCBASE +
-              d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
-
-       writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
-              D40_SREG_ELEM_LOG_LIDX_MASK,
-              d40c->base->virtbase + D40_DREG_PCBASE +
-              d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+       static const unsigned int phy_map[] = {
+               [STEDMA40_PCHAN_BASIC_MODE]
+                       = D40_DREG_PRMO_PCHAN_BASIC,
+               [STEDMA40_PCHAN_MODULO_MODE]
+                       = D40_DREG_PRMO_PCHAN_MODULO,
+               [STEDMA40_PCHAN_DOUBLE_DST_MODE]
+                       = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
+       };
+       static const unsigned int log_map[] = {
+               [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
+                       = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
+               [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
+                       = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
+               [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
+                       = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
+       };
+
+       if (d40c->log_num == D40_PHY_CHAN)
+               return phy_map[d40c->dma_cfg.mode_opt];
+       else
+               return log_map[d40c->dma_cfg.mode_opt];
 }
 
-static int d40_config_write(struct d40_chan *d40c)
+static void d40_config_write(struct d40_chan *d40c)
 {
        u32 addr_base;
        u32 var;
-       int res;
-
-       res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
-       if (res)
-               return res;
 
        /* Odd addresses are even addresses + 4 */
        addr_base = (d40c->phy_chan->num % 2) * 4;
@@ -688,8 +731,7 @@ static int d40_config_write(struct d40_chan *d40c)
        writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
 
        /* Setup operational mode option register */
-       var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
-              0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+       var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
 
        writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
 
@@ -704,41 +746,181 @@ static int d40_config_write(struct d40_chan *d40c)
                       d40c->phy_chan->num * D40_DREG_PCDELTA +
                       D40_CHAN_REG_SDCFG);
 
-               d40_config_enable_lidx(d40c);
+               /* Set LIDX for lcla */
+               writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+                      D40_SREG_ELEM_LOG_LIDX_MASK,
+                      d40c->base->virtbase + D40_DREG_PCBASE +
+                      d40c->phy_chan->num * D40_DREG_PCDELTA +
+                      D40_CHAN_REG_SDELT);
+
+               writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+                      D40_SREG_ELEM_LOG_LIDX_MASK,
+                      d40c->base->virtbase + D40_DREG_PCBASE +
+                      d40c->phy_chan->num * D40_DREG_PCDELTA +
+                      D40_CHAN_REG_SSELT);
+
+       }
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+       u32 num_elt;
+
+       if (d40c->log_num != D40_PHY_CHAN)
+               num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+                       >> D40_MEM_LCSP2_ECNT_POS;
+       else
+               num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                                d40c->phy_chan->num * D40_DREG_PCDELTA +
+                                D40_CHAN_REG_SDELT) &
+                          D40_SREG_ELEM_PHY_ECNT_MASK) >>
+                       D40_SREG_ELEM_PHY_ECNT_POS;
+       return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+       bool is_link;
+
+       if (d40c->log_num != D40_PHY_CHAN)
+               is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
+       else
+               is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                               d40c->phy_chan->num * D40_DREG_PCDELTA +
+                               D40_CHAN_REG_SDLNK) &
+                       D40_SREG_LNK_PHYS_LNK_MASK;
+       return is_link;
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+       struct d40_chan *d40c =
+               container_of(chan, struct d40_chan, chan);
+       int res = 0;
+       unsigned long flags;
+
+       if (!d40c->busy)
+               return 0;
+
+       spin_lock_irqsave(&d40c->lock, flags);
+
+       res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+       if (res == 0) {
+               if (d40c->log_num != D40_PHY_CHAN) {
+                       d40_config_set_event(d40c, false);
+                       /* Resume the other logical channels if any */
+                       if (d40_chan_has_events(d40c))
+                               res = d40_channel_execute_command(d40c,
+                                                                 D40_DMA_RUN);
+               }
        }
+
+       spin_unlock_irqrestore(&d40c->lock, flags);
        return res;
 }
 
-static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+static int d40_resume(struct dma_chan *chan)
 {
-       if (d40d->lli_phy.dst && d40d->lli_phy.src) {
-               d40_phy_lli_write(d40c->base->virtbase,
-                                 d40c->phy_chan->num,
-                                 d40d->lli_phy.dst,
-                                 d40d->lli_phy.src);
-       } else if (d40d->lli_log.dst && d40d->lli_log.src) {
-               struct d40_log_lli *src = d40d->lli_log.src;
-               struct d40_log_lli *dst = d40d->lli_log.dst;
-               int s;
-
-               src += d40d->lli_count;
-               dst += d40d->lli_count;
-               s = d40_log_lli_write(d40c->lcpa,
-                                     d40c->lcla.src, d40c->lcla.dst,
-                                     dst, src,
-                                     d40c->base->plat_data->llis_per_log);
-
-               /* If s equals to zero, the job is not linked */
-               if (s > 0) {
-                       (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
-                                             s * sizeof(struct d40_log_lli),
-                                             DMA_TO_DEVICE);
-                       (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
-                                             s * sizeof(struct d40_log_lli),
-                                             DMA_TO_DEVICE);
+       struct d40_chan *d40c =
+               container_of(chan, struct d40_chan, chan);
+       int res = 0;
+       unsigned long flags;
+
+       if (!d40c->busy)
+               return 0;
+
+       spin_lock_irqsave(&d40c->lock, flags);
+
+       if (d40c->base->rev == 0)
+               if (d40c->log_num != D40_PHY_CHAN) {
+                       res = d40_channel_execute_command(d40c,
+                                                         D40_DMA_SUSPEND_REQ);
+                       goto no_suspend;
                }
+
+       /* If bytes left to transfer or linked tx resume job */
+       if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+
+               if (d40c->log_num != D40_PHY_CHAN)
+                       d40_config_set_event(d40c, true);
+
+               res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+       }
+
+no_suspend:
+       spin_unlock_irqrestore(&d40c->lock, flags);
+       return res;
+}
+
+static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+       /* TODO: Write */
+}
+
+static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+       struct d40_desc *d40d_prev = NULL;
+       int i;
+       u32 val;
+
+       if (!list_empty(&d40c->queue))
+               d40d_prev = d40_last_queued(d40c);
+       else if (!list_empty(&d40c->active))
+               d40d_prev = d40_first_active_get(d40c);
+
+       if (!d40d_prev)
+               return;
+
+       /* Here we try to join this job with previous jobs */
+       val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                   d40c->phy_chan->num * D40_DREG_PCDELTA +
+                   D40_CHAN_REG_SSLNK);
+
+       /* Figure out which link we're currently transmitting */
+       for (i = 0; i < d40d_prev->lli_len; i++)
+               if (val == d40d_prev->lli_phy.src[i].reg_lnk)
+                       break;
+
+       val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                   d40c->phy_chan->num * D40_DREG_PCDELTA +
+                   D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
+
+       if (i == (d40d_prev->lli_len - 1) && val > 0) {
+               /* Change the current one */
+               writel(virt_to_phys(d40d->lli_phy.src),
+                      d40c->base->virtbase + D40_DREG_PCBASE +
+                      d40c->phy_chan->num * D40_DREG_PCDELTA +
+                      D40_CHAN_REG_SSLNK);
+               writel(virt_to_phys(d40d->lli_phy.dst),
+                      d40c->base->virtbase + D40_DREG_PCBASE +
+                      d40c->phy_chan->num * D40_DREG_PCDELTA +
+                      D40_CHAN_REG_SDLNK);
+
+               d40d->is_hw_linked = true;
+
+       } else if (i < d40d_prev->lli_len) {
+               (void) dma_unmap_single(d40c->base->dev,
+                                       virt_to_phys(d40d_prev->lli_phy.src),
+                                       d40d_prev->lli_pool.size,
+                                       DMA_TO_DEVICE);
+
+               /* Keep the settings */
+               val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
+                       ~D40_SREG_LNK_PHYS_LNK_MASK;
+               d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
+                       val | virt_to_phys(d40d->lli_phy.src);
+
+               val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
+                       ~D40_SREG_LNK_PHYS_LNK_MASK;
+               d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
+                       val | virt_to_phys(d40d->lli_phy.dst);
+
+               (void) dma_map_single(d40c->base->dev,
+                                     d40d_prev->lli_phy.src,
+                                     d40d_prev->lli_pool.size,
+                                     DMA_TO_DEVICE);
+               d40d->is_hw_linked = true;
        }
-       d40d->lli_count += d40d->lli_tx_len;
 }
 
 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -749,14 +931,28 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
        struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
        unsigned long flags;
 
+       (void) d40_pause(&d40c->chan);
+
        spin_lock_irqsave(&d40c->lock, flags);
 
-       tx->cookie = d40_assign_cookie(d40c, d40d);
+       d40c->chan.cookie++;
+
+       if (d40c->chan.cookie < 0)
+               d40c->chan.cookie = 1;
+
+       d40d->txd.cookie = d40c->chan.cookie;
+
+       if (d40c->log_num == D40_PHY_CHAN)
+               d40_tx_submit_phy(d40c, d40d);
+       else
+               d40_tx_submit_log(d40c, d40d);
 
        d40_desc_queue(d40c, d40d);
 
        spin_unlock_irqrestore(&d40c->lock, flags);
 
+       (void) d40_resume(&d40c->chan);
+
        return tx->cookie;
 }
 
@@ -796,14 +992,21 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
                /* Add to active queue */
                d40_desc_submit(d40c, d40d);
 
-               /* Initiate DMA job */
-               d40_desc_load(d40c, d40d);
+               /*
+                * If this job is already linked in hw,
+                * do not submit it.
+                */
 
-               /* Start dma job */
-               err = d40_start(d40c);
+               if (!d40d->is_hw_linked) {
+                       /* Initiate DMA job */
+                       d40_desc_load(d40c, d40d);
 
-               if (err)
-                       return NULL;
+                       /* Start dma job */
+                       err = d40_start(d40c);
+
+                       if (err)
+                               return NULL;
+               }
        }
 
        return d40d;
@@ -814,17 +1017,15 @@ static void dma_tc_handle(struct d40_chan *d40c)
 {
        struct d40_desc *d40d;
 
-       if (!d40c->phy_chan)
-               return;
-
        /* Get first active entry from list */
        d40d = d40_first_active_get(d40c);
 
        if (d40d == NULL)
                return;
 
-       if (d40d->lli_count < d40d->lli_len) {
+       d40_lcla_free_all(d40c, d40d);
 
+       if (d40d->lli_current < d40d->lli_len) {
                d40_desc_load(d40c, d40d);
                /* Start dma job */
                (void) d40_start(d40c);
@@ -842,7 +1043,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
 static void dma_tasklet(unsigned long data)
 {
        struct d40_chan *d40c = (struct d40_chan *) data;
-       struct d40_desc *d40d_fin;
+       struct d40_desc *d40d;
        unsigned long flags;
        dma_async_tx_callback callback;
        void *callback_param;
@@ -850,12 +1051,12 @@ static void dma_tasklet(unsigned long data)
        spin_lock_irqsave(&d40c->lock, flags);
 
        /* Get first active entry from list */
-       d40d_fin = d40_first_active_get(d40c);
+       d40d = d40_first_active_get(d40c);
 
-       if (d40d_fin == NULL)
+       if (d40d == NULL)
                goto err;
 
-       d40c->completed = d40d_fin->txd.cookie;
+       d40c->completed = d40d->txd.cookie;
 
        /*
         * If terminating a channel pending_tx is set to zero.
@@ -867,19 +1068,19 @@ static void dma_tasklet(unsigned long data)
        }
 
        /* Callback to client */
-       callback = d40d_fin->txd.callback;
-       callback_param = d40d_fin->txd.callback_param;
-
-       if (async_tx_test_ack(&d40d_fin->txd)) {
-               d40_pool_lli_free(d40d_fin);
-               d40_desc_remove(d40d_fin);
-               /* Return desc to free-list */
-               d40_desc_free(d40c, d40d_fin);
+       callback = d40d->txd.callback;
+       callback_param = d40d->txd.callback_param;
+
+       if (async_tx_test_ack(&d40d->txd)) {
+               d40_pool_lli_free(d40d);
+               d40_desc_remove(d40d);
+               d40_desc_free(d40c, d40d);
        } else {
-               if (!d40d_fin->is_in_client_list) {
-                       d40_desc_remove(d40d_fin);
-                       list_add_tail(&d40d_fin->node, &d40c->client);
-                       d40d_fin->is_in_client_list = true;
+               if (!d40d->is_in_client_list) {
+                       d40_desc_remove(d40d);
+                       d40_lcla_free_all(d40c, d40d);
+                       list_add_tail(&d40d->node, &d40c->client);
+                       d40d->is_in_client_list = true;
                }
        }
 
@@ -890,7 +1091,7 @@ static void dma_tasklet(unsigned long data)
 
        spin_unlock_irqrestore(&d40c->lock, flags);
 
-       if (callback)
+       if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
                callback(callback_param);
 
        return;
@@ -919,7 +1120,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
 
        int i;
        u32 regs[ARRAY_SIZE(il)];
-       u32 tmp;
        u32 idx;
        u32 row;
        long chan = -1;
@@ -946,9 +1146,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
                idx = chan & (BITS_PER_LONG - 1);
 
                /* ACK interrupt */
-               tmp = readl(base->virtbase + il[row].clr);
-               tmp |= 1 << idx;
-               writel(tmp, base->virtbase + il[row].clr);
+               writel(1 << idx, base->virtbase + il[row].clr);
 
                if (il[row].offset == D40_PHY_CHAN)
                        d40c = base->lookup_phy_chans[idx];
@@ -971,24 +1169,47 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-
 static int d40_validate_conf(struct d40_chan *d40c,
                             struct stedma40_chan_cfg *conf)
 {
        int res = 0;
        u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
        u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
-       bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
-               == STEDMA40_CHANNEL_IN_LOG_MODE;
+       bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
+
+       if (!conf->dir) {
+               dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
+                       __func__);
+               res = -EINVAL;
+       }
+
+       if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
+           d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
+           d40c->runtime_addr == 0) {
+
+               dev_err(&d40c->chan.dev->device,
+                       "[%s] Invalid TX channel address (%d)\n",
+                       __func__, conf->dst_dev_type);
+               res = -EINVAL;
+       }
+
+       if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
+           d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
+           d40c->runtime_addr == 0) {
+               dev_err(&d40c->chan.dev->device,
+                       "[%s] Invalid RX channel address (%d)\n",
+                       __func__, conf->src_dev_type);
+               res = -EINVAL;
+       }
 
-       if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+       if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
            dst_event_group == STEDMA40_DEV_DST_MEMORY) {
                dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
                        __func__);
                res = -EINVAL;
        }
 
-       if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+       if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
            src_event_group == STEDMA40_DEV_SRC_MEMORY) {
                dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
                        __func__);
@@ -1082,7 +1303,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
 
        spin_lock_irqsave(&phy->lock, flags);
        if (!log_event_line) {
-               /* Physical interrupts are masked per physical full channel */
                phy->allocated_dst = D40_ALLOC_FREE;
                phy->allocated_src = D40_ALLOC_FREE;
                is_free = true;
@@ -1119,10 +1339,7 @@ static int d40_allocate_channel(struct d40_chan *d40c)
        int j;
        int log_num;
        bool is_src;
-       bool is_log = (d40c->dma_cfg.channel_type &
-                      STEDMA40_CHANNEL_IN_OPER_MODE)
-               == STEDMA40_CHANNEL_IN_LOG_MODE;
-
+       bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
 
        phys = d40c->base->phy_res;
 
@@ -1251,7 +1468,6 @@ static int d40_free_dma(struct d40_chan *d40c)
                list_for_each_entry_safe(d, _d, &d40c->client, node) {
                        d40_pool_lli_free(d);
                        d40_desc_remove(d);
-                       /* Return desc to free-list */
                        d40_desc_free(d40c, d);
                }
 
@@ -1324,37 +1540,12 @@ static int d40_free_dma(struct d40_chan *d40c)
                return res;
        }
        d40c->phy_chan = NULL;
-       /* Invalidate channel type */
-       d40c->dma_cfg.channel_type = 0;
+       d40c->configured = false;
        d40c->base->lookup_phy_chans[phy->num] = NULL;
 
        return 0;
 }
 
-static int d40_pause(struct dma_chan *chan)
-{
-       struct d40_chan *d40c =
-               container_of(chan, struct d40_chan, chan);
-       int res;
-       unsigned long flags;
-
-       spin_lock_irqsave(&d40c->lock, flags);
-
-       res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
-       if (res == 0) {
-               if (d40c->log_num != D40_PHY_CHAN) {
-                       d40_config_set_event(d40c, false);
-                       /* Resume the other logical channels if any */
-                       if (d40_chan_has_events(d40c))
-                               res = d40_channel_execute_command(d40c,
-                                                                 D40_DMA_RUN);
-               }
-       }
-
-       spin_unlock_irqrestore(&d40c->lock, flags);
-       return res;
-}
-
 static bool d40_is_paused(struct d40_chan *d40c)
 {
        bool is_paused = false;
@@ -1381,16 +1572,22 @@ static bool d40_is_paused(struct d40_chan *d40c)
        }
 
        if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
-           d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+           d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
                event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
-       else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+               status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                              d40c->phy_chan->num * D40_DREG_PCDELTA +
+                              D40_CHAN_REG_SDLNK);
+       } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
                event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
-       else {
+               status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+                              d40c->phy_chan->num * D40_DREG_PCDELTA +
+                              D40_CHAN_REG_SSLNK);
+       } else {
                dev_err(&d40c->chan.dev->device,
                        "[%s] Unknown direction\n", __func__);
                goto _exit;
        }
-       status = d40_chan_has_events(d40c);
+
        status = (status & D40_EVENTLINE_MASK(event)) >>
                D40_EVENTLINE_POS(event);
 
@@ -1403,64 +1600,6 @@ _exit:
 }
 
 
-static bool d40_tx_is_linked(struct d40_chan *d40c)
-{
-       bool is_link;
-
-       if (d40c->log_num != D40_PHY_CHAN)
-               is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
-       else
-               is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
-                               d40c->phy_chan->num * D40_DREG_PCDELTA +
-                               D40_CHAN_REG_SDLNK) &
-                       D40_SREG_LNK_PHYS_LNK_MASK;
-       return is_link;
-}
-
-static u32 d40_residue(struct d40_chan *d40c)
-{
-       u32 num_elt;
-
-       if (d40c->log_num != D40_PHY_CHAN)
-               num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
-                       >> D40_MEM_LCSP2_ECNT_POS;
-       else
-               num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
-                                d40c->phy_chan->num * D40_DREG_PCDELTA +
-                                D40_CHAN_REG_SDELT) &
-                          D40_SREG_ELEM_PHY_ECNT_MASK) >>
-                       D40_SREG_ELEM_PHY_ECNT_POS;
-       return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
-}
-
-static int d40_resume(struct dma_chan *chan)
-{
-       struct d40_chan *d40c =
-               container_of(chan, struct d40_chan, chan);
-       int res = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&d40c->lock, flags);
-
-       if (d40c->base->rev == 0)
-               if (d40c->log_num != D40_PHY_CHAN) {
-                       res = d40_channel_execute_command(d40c,
-                                                         D40_DMA_SUSPEND_REQ);
-                       goto no_suspend;
-               }
-
-       /* If bytes left to transfer or linked tx resume job */
-       if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
-               if (d40c->log_num != D40_PHY_CHAN)
-                       d40_config_set_event(d40c, true);
-               res = d40_channel_execute_command(d40c, D40_DMA_RUN);
-       }
-
-no_suspend:
-       spin_unlock_irqrestore(&d40c->lock, flags);
-       return res;
-}
-
 static u32 stedma40_residue(struct dma_chan *chan)
 {
        struct d40_chan *d40c =
@@ -1475,51 +1614,6 @@ static u32 stedma40_residue(struct dma_chan *chan)
        return bytes_left;
 }
 
-/* Public DMA functions in addition to the DMA engine framework */
-
-int stedma40_set_psize(struct dma_chan *chan,
-                      int src_psize,
-                      int dst_psize)
-{
-       struct d40_chan *d40c =
-               container_of(chan, struct d40_chan, chan);
-       unsigned long flags;
-
-       spin_lock_irqsave(&d40c->lock, flags);
-
-       if (d40c->log_num != D40_PHY_CHAN) {
-               d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
-               d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
-               d40c->log_def.lcsp1 |= src_psize <<
-                       D40_MEM_LCSP1_SCFG_PSIZE_POS;
-               d40c->log_def.lcsp3 |= dst_psize <<
-                       D40_MEM_LCSP1_SCFG_PSIZE_POS;
-               goto out;
-       }
-
-       if (src_psize == STEDMA40_PSIZE_PHY_1)
-               d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
-       else {
-               d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
-               d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
-                                      D40_SREG_CFG_PSIZE_POS);
-               d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
-       }
-
-       if (dst_psize == STEDMA40_PSIZE_PHY_1)
-               d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
-       else {
-               d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
-               d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
-                                      D40_SREG_CFG_PSIZE_POS);
-               d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
-       }
-out:
-       spin_unlock_irqrestore(&d40c->lock, flags);
-       return 0;
-}
-EXPORT_SYMBOL(stedma40_set_psize);
-
 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
                                                   struct scatterlist *sgl_dst,
                                                   struct scatterlist *sgl_src,
@@ -1545,21 +1639,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
                goto err;
 
        d40d->lli_len = sgl_len;
-       d40d->lli_tx_len = d40d->lli_len;
+       d40d->lli_current = 0;
        d40d->txd.flags = dma_flags;
 
        if (d40c->log_num != D40_PHY_CHAN) {
-               if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
-                       d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
-
-               if (sgl_len > 1)
-                       /*
-                        * Check if there is space available in lcla. If not,
-                        * split list into 1-length and run only in lcpa
-                        * space.
-                        */
-                       if (d40_lcla_id_get(d40c) != 0)
-                               d40d->lli_tx_len = 1;
 
                if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
                        dev_err(&d40c->chan.dev->device,
@@ -1567,27 +1650,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
                        goto err;
                }
 
-               (void) d40_log_sg_to_lli(d40c->lcla.src_id,
-                                        sgl_src,
+               (void) d40_log_sg_to_lli(sgl_src,
                                         sgl_len,
                                         d40d->lli_log.src,
                                         d40c->log_def.lcsp1,
-                                        d40c->dma_cfg.src_info.data_width,
-                                        dma_flags & DMA_PREP_INTERRUPT,
-                                        d40d->lli_tx_len,
-                                        d40c->base->plat_data->llis_per_log);
+                                        d40c->dma_cfg.src_info.data_width);
 
-               (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
-                                        sgl_dst,
+               (void) d40_log_sg_to_lli(sgl_dst,
                                         sgl_len,
                                         d40d->lli_log.dst,
                                         d40c->log_def.lcsp3,
-                                        d40c->dma_cfg.dst_info.data_width,
-                                        dma_flags & DMA_PREP_INTERRUPT,
-                                        d40d->lli_tx_len,
-                                        d40c->base->plat_data->llis_per_log);
-
-
+                                        d40c->dma_cfg.dst_info.data_width);
        } else {
                if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
                        dev_err(&d40c->chan.dev->device,
@@ -1599,11 +1672,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
                                        sgl_len,
                                        0,
                                        d40d->lli_phy.src,
-                                       d40d->lli_phy.src_addr,
+                                       virt_to_phys(d40d->lli_phy.src),
                                        d40c->src_def_cfg,
                                        d40c->dma_cfg.src_info.data_width,
-                                       d40c->dma_cfg.src_info.psize,
-                                       true);
+                                       d40c->dma_cfg.src_info.psize);
 
                if (res < 0)
                        goto err;
@@ -1612,11 +1684,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
                                        sgl_len,
                                        0,
                                        d40d->lli_phy.dst,
-                                       d40d->lli_phy.dst_addr,
+                                       virt_to_phys(d40d->lli_phy.dst),
                                        d40c->dst_def_cfg,
                                        d40c->dma_cfg.dst_info.data_width,
-                                       d40c->dma_cfg.dst_info.psize,
-                                       true);
+                                       d40c->dma_cfg.dst_info.psize);
 
                if (res < 0)
                        goto err;
@@ -1633,6 +1704,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
 
        return &d40d->txd;
 err:
+       if (d40d)
+               d40_desc_free(d40c, d40d);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return NULL;
 }
@@ -1652,6 +1725,9 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
        } else
                err = d40_config_memcpy(d40c);
 
+       if (!err)
+               d40c->configured = true;
+
        return err == 0;
 }
 EXPORT_SYMBOL(stedma40_filter);
@@ -1668,11 +1744,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
 
        d40c->completed = chan->cookie = 1;
 
-       /*
-        * If no dma configuration is set (channel_type == 0)
-        * use default configuration (memcpy)
-        */
-       if (d40c->dma_cfg.channel_type == 0) {
+       /* If no dma configuration is set use default configuration (memcpy) */
+       if (!d40c->configured) {
                err = d40_config_memcpy(d40c);
                if (err) {
                        dev_err(&d40c->chan.dev->device,
@@ -1712,14 +1785,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
         * resource is free. In case of multiple logical channels
         * on the same physical resource, only the first write is necessary.
         */
-       if (is_free_phy) {
-               err = d40_config_write(d40c);
-               if (err) {
-                       dev_err(&d40c->chan.dev->device,
-                               "[%s] Failed to configure channel\n",
-                               __func__);
-               }
-       }
+       if (is_free_phy)
+               d40_config_write(d40c);
 fail:
        spin_unlock_irqrestore(&d40c->lock, flags);
        return err;
@@ -1790,23 +1857,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
                        goto err;
                }
                d40d->lli_len = 1;
-               d40d->lli_tx_len = 1;
+               d40d->lli_current = 0;
 
                d40_log_fill_lli(d40d->lli_log.src,
                                 src,
                                 size,
-                                0,
                                 d40c->log_def.lcsp1,
                                 d40c->dma_cfg.src_info.data_width,
-                                false, true);
+                                true);
 
                d40_log_fill_lli(d40d->lli_log.dst,
                                 dst,
                                 size,
-                                0,
                                 d40c->log_def.lcsp3,
                                 d40c->dma_cfg.dst_info.data_width,
-                                true, true);
+                                true);
 
        } else {
 
@@ -1851,12 +1916,25 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
 err_fill_lli:
        dev_err(&d40c->chan.dev->device,
                "[%s] Failed filling in PHY LLI\n", __func__);
-       d40_pool_lli_free(d40d);
 err:
+       if (d40d)
+               d40_desc_free(d40c, d40d);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return NULL;
 }
 
+static struct dma_async_tx_descriptor *
+d40_prep_sg(struct dma_chan *chan,
+           struct scatterlist *dst_sg, unsigned int dst_nents,
+           struct scatterlist *src_sg, unsigned int src_nents,
+           unsigned long dma_flags)
+{
+       if (dst_nents != src_nents)
+               return NULL;
+
+       return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
+}
+
 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
                                 struct d40_chan *d40c,
                                 struct scatterlist *sgl,
@@ -1874,19 +1952,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
        }
 
        d40d->lli_len = sg_len;
-       if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
-               d40d->lli_tx_len = d40d->lli_len;
-       else
-               d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
-
-       if (sg_len > 1)
-               /*
-                * Check if there is space available in lcla.
-                * If not, split list into 1-length and run only
-                * in lcpa space.
-                */
-               if (d40_lcla_id_get(d40c) != 0)
-                       d40d->lli_tx_len = 1;
+       d40d->lli_current = 0;
 
        if (direction == DMA_FROM_DEVICE)
                if (d40c->runtime_addr)
@@ -1902,16 +1968,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
        else
                return -EINVAL;
 
-       total_size = d40_log_sg_to_dev(&d40c->lcla,
-                                      sgl, sg_len,
+       total_size = d40_log_sg_to_dev(sgl, sg_len,
                                       &d40d->lli_log,
                                       &d40c->log_def,
                                       d40c->dma_cfg.src_info.data_width,
                                       d40c->dma_cfg.dst_info.data_width,
                                       direction,
-                                      dma_flags & DMA_PREP_INTERRUPT,
-                                      dev_addr, d40d->lli_tx_len,
-                                      d40c->base->plat_data->llis_per_log);
+                                      dev_addr);
 
        if (total_size < 0)
                return -EINVAL;
@@ -1937,7 +2000,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
        }
 
        d40d->lli_len = sgl_len;
-       d40d->lli_tx_len = sgl_len;
+       d40d->lli_current = 0;
 
        if (direction == DMA_FROM_DEVICE) {
                dst_dev_addr = 0;
@@ -1958,11 +2021,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
                                sgl_len,
                                src_dev_addr,
                                d40d->lli_phy.src,
-                               d40d->lli_phy.src_addr,
+                               virt_to_phys(d40d->lli_phy.src),
                                d40c->src_def_cfg,
                                d40c->dma_cfg.src_info.data_width,
-                               d40c->dma_cfg.src_info.psize,
-                               true);
+                               d40c->dma_cfg.src_info.psize);
        if (res < 0)
                return res;
 
@@ -1970,11 +2032,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
                                sgl_len,
                                dst_dev_addr,
                                d40d->lli_phy.dst,
-                               d40d->lli_phy.dst_addr,
+                               virt_to_phys(d40d->lli_phy.dst),
                                d40c->dst_def_cfg,
                                d40c->dma_cfg.dst_info.data_width,
-                               d40c->dma_cfg.dst_info.psize,
-                                true);
+                               d40c->dma_cfg.dst_info.psize);
        if (res < 0)
                return res;
 
@@ -2001,17 +2062,11 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
                return ERR_PTR(-EINVAL);
        }
 
-       if (d40c->dma_cfg.pre_transfer)
-               d40c->dma_cfg.pre_transfer(chan,
-                                          d40c->dma_cfg.pre_transfer_data,
-                                          sg_dma_len(sgl));
-
        spin_lock_irqsave(&d40c->lock, flags);
        d40d = d40_desc_get(d40c);
-       spin_unlock_irqrestore(&d40c->lock, flags);
 
        if (d40d == NULL)
-               return NULL;
+               goto err;
 
        if (d40c->log_num != D40_PHY_CHAN)
                err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
@@ -2024,7 +2079,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
                        "[%s] Failed to prepare %s slave sg job: %d\n",
                        __func__,
                        d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
-               return NULL;
+               goto err;
        }
 
        d40d->txd.flags = dma_flags;
@@ -2033,7 +2088,14 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
 
        d40d->txd.tx_submit = d40_tx_submit;
 
+       spin_unlock_irqrestore(&d40c->lock, flags);
        return &d40d->txd;
+
+err:
+       if (d40d)
+               d40_desc_free(d40c, d40d);
+       spin_unlock_irqrestore(&d40c->lock, flags);
+       return NULL;
 }
 
 static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2166,25 +2228,43 @@ static void d40_set_runtime_config(struct dma_chan *chan,
                return;
        }
 
-       if (config_maxburst >= 16)
-               psize = STEDMA40_PSIZE_LOG_16;
-       else if (config_maxburst >= 8)
-               psize = STEDMA40_PSIZE_LOG_8;
-       else if (config_maxburst >= 4)
-               psize = STEDMA40_PSIZE_LOG_4;
-       else
-               psize = STEDMA40_PSIZE_LOG_1;
+       if (d40c->log_num != D40_PHY_CHAN) {
+               if (config_maxburst >= 16)
+                       psize = STEDMA40_PSIZE_LOG_16;
+               else if (config_maxburst >= 8)
+                       psize = STEDMA40_PSIZE_LOG_8;
+               else if (config_maxburst >= 4)
+                       psize = STEDMA40_PSIZE_LOG_4;
+               else
+                       psize = STEDMA40_PSIZE_LOG_1;
+       } else {
+               if (config_maxburst >= 16)
+                       psize = STEDMA40_PSIZE_PHY_16;
+               else if (config_maxburst >= 8)
+                       psize = STEDMA40_PSIZE_PHY_8;
+               else if (config_maxburst >= 4)
+                       psize = STEDMA40_PSIZE_PHY_4;
+               else
+                       psize = STEDMA40_PSIZE_PHY_1;
+       }
 
        /* Set up all the endpoint configs */
        cfg->src_info.data_width = addr_width;
        cfg->src_info.psize = psize;
-       cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
+       cfg->src_info.big_endian = false;
        cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
        cfg->dst_info.data_width = addr_width;
        cfg->dst_info.psize = psize;
-       cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
+       cfg->dst_info.big_endian = false;
        cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
 
+       /* Fill in register values */
+       if (d40c->log_num != D40_PHY_CHAN)
+               d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+       else
+               d40_phy_cfg(cfg, &d40c->src_def_cfg,
+                           &d40c->dst_def_cfg, false);
+
        /* These settings will take precedence later */
        d40c->runtime_addr = config_addr;
        d40c->runtime_direction = config->direction;
@@ -2247,10 +2327,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
                d40c->base = base;
                d40c->chan.device = dma;
 
-               /* Invalidate lcla element */
-               d40c->lcla.src_id = -1;
-               d40c->lcla.dst_id = -1;
-
                spin_lock_init(&d40c->lock);
 
                d40c->log_num = D40_PHY_CHAN;
@@ -2281,6 +2357,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
        base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
        base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
        base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+       base->dma_slave.device_prep_dma_sg = d40_prep_sg;
        base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
        base->dma_slave.device_tx_status = d40_tx_status;
        base->dma_slave.device_issue_pending = d40_issue_pending;
@@ -2301,10 +2378,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
 
        dma_cap_zero(base->dma_memcpy.cap_mask);
        dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+       dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
 
        base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
        base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
        base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+       base->dma_slave.device_prep_dma_sg = d40_prep_sg;
        base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
        base->dma_memcpy.device_tx_status = d40_tx_status;
        base->dma_memcpy.device_issue_pending = d40_issue_pending;
@@ -2331,10 +2410,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
        dma_cap_zero(base->dma_both.cap_mask);
        dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
        dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+       dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
 
        base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
        base->dma_both.device_free_chan_resources = d40_free_chan_resources;
        base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+       base->dma_slave.device_prep_dma_sg = d40_prep_sg;
        base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
        base->dma_both.device_tx_status = d40_tx_status;
        base->dma_both.device_issue_pending = d40_issue_pending;
@@ -2387,9 +2468,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
 
        /* Mark disabled channels as occupied */
        for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
-                       base->phy_res[i].allocated_src = D40_ALLOC_PHY;
-                       base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
-                       num_phy_chans_avail--;
+               int chan = base->plat_data->disabled_channels[i];
+
+               base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
+               base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+               num_phy_chans_avail--;
        }
 
        dev_info(base->dev, "%d of %d physical DMA channels available\n",
@@ -2441,6 +2524,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
        int num_phy_chans;
        int i;
        u32 val;
+       u32 rev;
 
        clk = clk_get(&pdev->dev, NULL);
 
@@ -2479,21 +2563,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
                }
        }
 
-       /* Get silicon revision */
+       /* Get silicon revision and designer */
        val = readl(virtbase + D40_DREG_PERIPHID2);
 
-       if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
+       if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
+           D40_HW_DESIGNER) {
                dev_err(&pdev->dev,
                        "[%s] Unknown designer! Got %x wanted %x\n",
-                       __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
+                       __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
+                       D40_HW_DESIGNER);
                goto failure;
        }
 
+       rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
+               D40_DREG_PERIPHID2_REV_POS;
+
        /* The number of physical channels on this HW */
        num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
 
        dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
-                (val >> 4) & 0xf, res->start);
+                rev, res->start);
 
        plat_data = pdev->dev.platform_data;
 
@@ -2515,7 +2604,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
                goto failure;
        }
 
-       base->rev = (val >> 4) & 0xf;
+       base->rev = rev;
        base->clk = clk;
        base->num_phy_chans = num_phy_chans;
        base->num_log_chans = num_log_chans;
@@ -2549,7 +2638,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
                if (!base->lookup_log_chans)
                        goto failure;
        }
-       base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+
+       base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
+                                           sizeof(struct d40_desc *) *
+                                           D40_LCLA_LINK_PER_EVENT_GRP,
                                            GFP_KERNEL);
        if (!base->lcla_pool.alloc_map)
                goto failure;
@@ -2563,7 +2655,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
        return base;
 
 failure:
-       if (clk) {
+       if (!IS_ERR(clk)) {
                clk_disable(clk);
                clk_put(clk);
        }
@@ -2700,8 +2792,10 @@ static int __init d40_lcla_allocate(struct d40_base *base)
        if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
                base->lcla_pool.base = (void *)page_list[i];
        } else {
-               /* After many attempts, no succees with finding the correct
-                * alignment try with allocating a big buffer */
+               /*
+                * After many attempts and no succees with finding the correct
+                * alignment, try with allocating a big buffer.
+                */
                dev_warn(base->dev,
                         "[%s] Failed to get %d pages @ 18 bit align.\n",
                         __func__, base->lcla_pool.pages);
@@ -2794,8 +2888,6 @@ static int __init d40_probe(struct platform_device *pdev)
 
        spin_lock_init(&base->lcla_pool.lock);
 
-       base->lcla_pool.num_blocks = base->num_phy_chans;
-
        base->irq = platform_get_irq(pdev, 0);
 
        ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
@@ -2823,8 +2915,9 @@ failure:
                if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
                        free_pages((unsigned long)base->lcla_pool.base,
                                   base->lcla_pool.pages);
-               if (base->lcla_pool.base_unaligned)
-                       kfree(base->lcla_pool.base_unaligned);
+
+               kfree(base->lcla_pool.base_unaligned);
+
                if (base->phy_lcpa)
                        release_mem_region(base->phy_lcpa,
                                           base->lcpa_size);
index d937f76d6e2e67a20a49838ed1a494c05edd6375..8557cb88b255858efe98dbe0811c05ac4bf1c4a8 100644 (file)
@@ -1,10 +1,8 @@
 /*
- * driver/dma/ste_dma40_ll.c
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
  */
 
 #include <linux/kernel.h>
@@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
            cfg->dir ==  STEDMA40_PERIPH_TO_PERIPH)
                l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
 
-       l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
        l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
        l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
        l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
-       l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
 
        l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
        l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
        l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
-       l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
 
        *lcsp1 = l1;
        *lcsp3 = l3;
@@ -113,13 +108,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
                src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
        }
 
-       if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+       if (cfg->high_priority) {
                src |= 1 << D40_SREG_CFG_PRI_POS;
                dst |= 1 << D40_SREG_CFG_PRI_POS;
        }
 
-       src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
-       dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+       if (cfg->src_info.big_endian)
+               src |= 1 << D40_SREG_CFG_LBE_POS;
+       if (cfg->dst_info.big_endian)
+               dst |= 1 << D40_SREG_CFG_LBE_POS;
 
        *src_cfg = src;
        *dst_cfg = dst;
@@ -197,8 +194,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
                      dma_addr_t lli_phys,
                      u32 reg_cfg,
                      u32 data_width,
-                     int psize,
-                     bool term_int)
+                     int psize)
 {
        int total_size = 0;
        int i;
@@ -238,7 +234,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
        }
 
        return total_size;
- err:
+err:
        return err;
 }
 
@@ -271,11 +267,59 @@ void d40_phy_lli_write(void __iomem *virtbase,
 
 /* DMA logical lli operations */
 
+static void d40_log_lli_link(struct d40_log_lli *lli_dst,
+                            struct d40_log_lli *lli_src,
+                            int next)
+{
+       u32 slos = 0;
+       u32 dlos = 0;
+
+       if (next != -EINVAL) {
+               slos = next * 2;
+               dlos = next * 2 + 1;
+       } else {
+               lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+               lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+       }
+
+       lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
+               (slos << D40_MEM_LCSP1_SLOS_POS);
+
+       lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
+               (dlos << D40_MEM_LCSP1_SLOS_POS);
+}
+
+void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
+                          struct d40_log_lli *lli_dst,
+                          struct d40_log_lli *lli_src,
+                          int next)
+{
+       d40_log_lli_link(lli_dst, lli_src, next);
+
+       writel(lli_src->lcsp02, &lcpa[0].lcsp0);
+       writel(lli_src->lcsp13, &lcpa[0].lcsp1);
+       writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
+       writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+}
+
+void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
+                          struct d40_log_lli *lli_dst,
+                          struct d40_log_lli *lli_src,
+                          int next)
+{
+       d40_log_lli_link(lli_dst, lli_src, next);
+
+       writel(lli_src->lcsp02, &lcla[0].lcsp02);
+       writel(lli_src->lcsp13, &lcla[0].lcsp13);
+       writel(lli_dst->lcsp02, &lcla[1].lcsp02);
+       writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+}
+
 void d40_log_fill_lli(struct d40_log_lli *lli,
                      dma_addr_t data, u32 data_size,
-                     u32 lli_next_off, u32 reg_cfg,
+                     u32 reg_cfg,
                      u32 data_width,
-                     bool term_int, bool addr_inc)
+                     bool addr_inc)
 {
        lli->lcsp13 = reg_cfg;
 
@@ -290,165 +334,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
        if (addr_inc)
                lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
 
-       lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
-       /* If this scatter list entry is the last one, no next link */
-       lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
-               D40_MEM_LCSP1_SLOS_MASK;
-
-       if (term_int)
-               lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
-       else
-               lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
 }
 
-int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
-                     struct scatterlist *sg,
+int d40_log_sg_to_dev(struct scatterlist *sg,
                      int sg_len,
                      struct d40_log_lli_bidir *lli,
                      struct d40_def_lcsp *lcsp,
                      u32 src_data_width,
                      u32 dst_data_width,
                      enum dma_data_direction direction,
-                     bool term_int, dma_addr_t dev_addr, int max_len,
-                     int llis_per_log)
+                     dma_addr_t dev_addr)
 {
        int total_size = 0;
        struct scatterlist *current_sg = sg;
        int i;
-       u32 next_lli_off_dst = 0;
-       u32 next_lli_off_src = 0;
 
        for_each_sg(sg, current_sg, sg_len, i) {
                total_size += sg_dma_len(current_sg);
 
-               /*
-                * If this scatter list entry is the last one or
-                * max length, terminate link.
-                */
-               if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
-                       next_lli_off_src = 0;
-                       next_lli_off_dst = 0;
-               } else {
-                       if (next_lli_off_dst == 0 &&
-                           next_lli_off_src == 0) {
-                               /* The first lli will be at next_lli_off */
-                               next_lli_off_dst = (lcla->dst_id *
-                                                   llis_per_log + 1);
-                               next_lli_off_src = (lcla->src_id *
-                                                   llis_per_log + 1);
-                       } else {
-                               next_lli_off_dst++;
-                               next_lli_off_src++;
-                       }
-               }
-
                if (direction == DMA_TO_DEVICE) {
                        d40_log_fill_lli(&lli->src[i],
                                         sg_phys(current_sg),
                                         sg_dma_len(current_sg),
-                                        next_lli_off_src,
                                         lcsp->lcsp1, src_data_width,
-                                        false,
                                         true);
                        d40_log_fill_lli(&lli->dst[i],
                                         dev_addr,
                                         sg_dma_len(current_sg),
-                                        next_lli_off_dst,
                                         lcsp->lcsp3, dst_data_width,
-                                        /* No next == terminal interrupt */
-                                        term_int && !next_lli_off_dst,
                                         false);
                } else {
                        d40_log_fill_lli(&lli->dst[i],
                                         sg_phys(current_sg),
                                         sg_dma_len(current_sg),
-                                        next_lli_off_dst,
                                         lcsp->lcsp3, dst_data_width,
-                                        /* No next == terminal interrupt */
-                                        term_int && !next_lli_off_dst,
                                         true);
                        d40_log_fill_lli(&lli->src[i],
                                         dev_addr,
                                         sg_dma_len(current_sg),
-                                        next_lli_off_src,
                                         lcsp->lcsp1, src_data_width,
-                                        false,
                                         false);
                }
        }
        return total_size;
 }
 
-int d40_log_sg_to_lli(int lcla_id,
-                     struct scatterlist *sg,
+int d40_log_sg_to_lli(struct scatterlist *sg,
                      int sg_len,
                      struct d40_log_lli *lli_sg,
                      u32 lcsp13, /* src or dst*/
-                     u32 data_width,
-                     bool term_int, int max_len, int llis_per_log)
+                     u32 data_width)
 {
        int total_size = 0;
        struct scatterlist *current_sg = sg;
        int i;
-       u32 next_lli_off = 0;
 
        for_each_sg(sg, current_sg, sg_len, i) {
                total_size += sg_dma_len(current_sg);
 
-               /*
-                * If this scatter list entry is the last one or
-                * max length, terminate link.
-                */
-               if (sg_len - 1 == i || ((i+1) % max_len == 0))
-                       next_lli_off = 0;
-               else {
-                       if (next_lli_off == 0)
-                               /* The first lli will be at next_lli_off */
-                               next_lli_off = lcla_id * llis_per_log + 1;
-                       else
-                               next_lli_off++;
-               }
-
                d40_log_fill_lli(&lli_sg[i],
                                 sg_phys(current_sg),
                                 sg_dma_len(current_sg),
-                                next_lli_off,
                                 lcsp13, data_width,
-                                term_int && !next_lli_off,
                                 true);
        }
        return total_size;
 }
-
-int d40_log_lli_write(struct d40_log_lli_full *lcpa,
-                      struct d40_log_lli *lcla_src,
-                      struct d40_log_lli *lcla_dst,
-                      struct d40_log_lli *lli_dst,
-                      struct d40_log_lli *lli_src,
-                      int llis_per_log)
-{
-       u32 slos;
-       u32 dlos;
-       int i;
-
-       writel(lli_src->lcsp02, &lcpa->lcsp0);
-       writel(lli_src->lcsp13, &lcpa->lcsp1);
-       writel(lli_dst->lcsp02, &lcpa->lcsp2);
-       writel(lli_dst->lcsp13, &lcpa->lcsp3);
-
-       slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
-       dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
-
-       for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
-               writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
-               writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
-               writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
-               writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
-
-               slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
-               dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
-       }
-
-       return i;
-
-}
index 9c0fa2f5fe570697768c23b703d2e7faf69e56f3..9e419b907544bc3b6e1ba7bcfb7ffccbf22600cb 100644 (file)
@@ -1,10 +1,8 @@
 /*
- * driver/dma/ste_dma40_ll.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA
  * License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
  */
 #ifndef STE_DMA40_LL_H
 #define STE_DMA40_LL_H
 #define D40_DREG_PRMSO         0x014
 #define D40_DREG_PRMOE         0x018
 #define D40_DREG_PRMOO         0x01C
+#define D40_DREG_PRMO_PCHAN_BASIC              0x1
+#define D40_DREG_PRMO_PCHAN_MODULO             0x2
+#define D40_DREG_PRMO_PCHAN_DOUBLE_DST         0x3
+#define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG    0x1
+#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY    0x2
+#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG    0x3
+
 #define D40_DREG_LCPA          0x020
 #define D40_DREG_LCLA          0x024
 #define D40_DREG_ACTIVE                0x050
 #define D40_DREG_PERIPHID0     0xFE0
 #define D40_DREG_PERIPHID1     0xFE4
 #define D40_DREG_PERIPHID2     0xFE8
+#define D40_DREG_PERIPHID2_REV_POS 4
+#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
+#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
 #define D40_DREG_PERIPHID3     0xFEC
 #define D40_DREG_CELLID0       0xFF0
 #define D40_DREG_CELLID1       0xFF4
@@ -199,8 +207,6 @@ struct d40_phy_lli {
  *
  * @src: Register settings for src channel.
  * @dst: Register settings for dst channel.
- * @dst_addr: Physical destination address.
- * @src_addr: Physical source address.
  *
  * All DMA transfers have a source and a destination.
  */
@@ -208,8 +214,6 @@ struct d40_phy_lli {
 struct d40_phy_lli_bidir {
        struct d40_phy_lli      *src;
        struct d40_phy_lli      *dst;
-       dma_addr_t               dst_addr;
-       dma_addr_t               src_addr;
 };
 
 
@@ -271,29 +275,16 @@ struct d40_def_lcsp {
        u32 lcsp1;
 };
 
-/**
- * struct d40_lcla_elem - Info for one LCA element.
- *
- * @src_id: logical channel src id
- * @dst_id: logical channel dst id
- * @src: LCPA formated src parameters
- * @dst: LCPA formated dst parameters
- *
- */
-struct d40_lcla_elem {
-       int                     src_id;
-       int                     dst_id;
-       struct d40_log_lli     *src;
-       struct d40_log_lli     *dst;
-};
-
 /* Physical channels */
 
 void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
-                u32 *src_cfg, u32 *dst_cfg, bool is_log);
+                u32 *src_cfg,
+                u32 *dst_cfg,
+                bool is_log);
 
 void d40_log_cfg(struct stedma40_chan_cfg *cfg,
-                u32 *lcsp1, u32 *lcsp2);
+                u32 *lcsp1,
+                u32 *lcsp2);
 
 int d40_phy_sg_to_lli(struct scatterlist *sg,
                      int sg_len,
@@ -302,8 +293,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
                      dma_addr_t lli_phys,
                      u32 reg_cfg,
                      u32 data_width,
-                     int psize,
-                     bool term_int);
+                     int psize);
 
 int d40_phy_fill_lli(struct d40_phy_lli *lli,
                     dma_addr_t data,
@@ -323,35 +313,35 @@ void d40_phy_lli_write(void __iomem *virtbase,
 /* Logical channels */
 
 void d40_log_fill_lli(struct d40_log_lli *lli,
-                     dma_addr_t data, u32 data_size,
-                     u32 lli_next_off, u32 reg_cfg,
+                     dma_addr_t data,
+                     u32 data_size,
+                     u32 reg_cfg,
                      u32 data_width,
-                     bool term_int, bool addr_inc);
+                     bool addr_inc);
 
-int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
-                     struct scatterlist *sg,
+int d40_log_sg_to_dev(struct scatterlist *sg,
                      int sg_len,
                      struct d40_log_lli_bidir *lli,
                      struct d40_def_lcsp *lcsp,
                      u32 src_data_width,
                      u32 dst_data_width,
                      enum dma_data_direction direction,
-                     bool term_int, dma_addr_t dev_addr, int max_len,
-                     int llis_per_log);
-
-int d40_log_lli_write(struct d40_log_lli_full *lcpa,
-                     struct d40_log_lli *lcla_src,
-                     struct d40_log_lli *lcla_dst,
-                     struct d40_log_lli *lli_dst,
-                     struct d40_log_lli *lli_src,
-                     int llis_per_log);
-
-int d40_log_sg_to_lli(int lcla_id,
-                     struct scatterlist *sg,
+                     dma_addr_t dev_addr);
+
+int d40_log_sg_to_lli(struct scatterlist *sg,
                      int sg_len,
                      struct d40_log_lli *lli_sg,
                      u32 lcsp13, /* src or dst*/
-                     u32 data_width,
-                     bool term_int, int max_len, int llis_per_log);
+                     u32 data_width);
+
+void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
+                           struct d40_log_lli *lli_dst,
+                           struct d40_log_lli *lli_src,
+                           int next);
+
+void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
+                           struct d40_log_lli *lli_dst,
+                           struct d40_log_lli *lli_src,
+                           int next);
 
 #endif /* STE_DMA40_LLI_H */
index 2ec1ed56f20436d5e5d3df9e57f1f09f81297997..3b88a4e7c98a02e9f1640d3bd39272b9034beb62 100644 (file)
@@ -759,7 +759,7 @@ static int __devinit td_probe(struct platform_device *pdev)
                        pdata->channels + i;
 
                /* even channels are RX, odd are TX */
-               if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+               if ((i % 2) == pchan->rx) {
                        dev_err(&pdev->dev, "Wrong channel configuration\n");
                        err = -EINVAL;
                        goto err_tasklet_kill;
index b3d22d6599901756a694d8eac124cf0326eba399..e28e41668177c009e5a39f8007636ab9c0ce960b 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/ctype.h>
 #include <linux/dmi.h>
 #include <linux/efi.h>
 #include <linux/bootmem.h>
@@ -361,6 +362,33 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
        }
 }
 
+static void __init print_filtered(const char *info)
+{
+       const char *p;
+
+       if (!info)
+               return;
+
+       for (p = info; *p; p++)
+               if (isprint(*p))
+                       printk(KERN_CONT "%c", *p);
+               else
+                       printk(KERN_CONT "\\x%02x", *p & 0xff);
+}
+
+static void __init dmi_dump_ids(void)
+{
+       printk(KERN_DEBUG "DMI: ");
+       print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
+       printk(KERN_CONT "/");
+       print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+       printk(KERN_CONT ", BIOS ");
+       print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
+       printk(KERN_CONT " ");
+       print_filtered(dmi_get_system_info(DMI_BIOS_DATE));
+       printk(KERN_CONT "\n");
+}
+
 static int __init dmi_present(const char __iomem *p)
 {
        u8 buf[15];
@@ -381,8 +409,10 @@ static int __init dmi_present(const char __iomem *p)
                               buf[14] >> 4, buf[14] & 0xF);
                else
                        printk(KERN_INFO "DMI present.\n");
-               if (dmi_walk_early(dmi_decode) == 0)
+               if (dmi_walk_early(dmi_decode) == 0) {
+                       dmi_dump_ids();
                        return 0;
+               }
        }
        return 1;
 }
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/74x164.c
new file mode 100644 (file)
index 0000000..d91ff4c
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ *  74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
+ *
+ *  Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/74x164.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define GEN_74X164_GPIO_COUNT  8
+
+
+struct gen_74x164_chip {
+       struct spi_device       *spi;
+       struct gpio_chip        gpio_chip;
+       struct mutex            lock;
+       u8                      port_config;
+};
+
+static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
+
+static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
+{
+       return container_of(gc, struct gen_74x164_chip, gpio_chip);
+}
+
+static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
+{
+       return spi_write(chip->spi,
+                        &chip->port_config, sizeof(chip->port_config));
+}
+
+static int gen_74x164_direction_output(struct gpio_chip *gc,
+               unsigned offset, int val)
+{
+       gen_74x164_set_value(gc, offset, val);
+       return 0;
+}
+
+static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
+{
+       struct gen_74x164_chip *chip = gpio_to_chip(gc);
+       int ret;
+
+       mutex_lock(&chip->lock);
+       ret = (chip->port_config >> offset) & 0x1;
+       mutex_unlock(&chip->lock);
+
+       return ret;
+}
+
+static void gen_74x164_set_value(struct gpio_chip *gc,
+               unsigned offset, int val)
+{
+       struct gen_74x164_chip *chip = gpio_to_chip(gc);
+
+       mutex_lock(&chip->lock);
+       if (val)
+               chip->port_config |= (1 << offset);
+       else
+               chip->port_config &= ~(1 << offset);
+
+       __gen_74x164_write_config(chip);
+       mutex_unlock(&chip->lock);
+}
+
+static int __devinit gen_74x164_probe(struct spi_device *spi)
+{
+       struct gen_74x164_chip *chip;
+       struct gen_74x164_chip_platform_data *pdata;
+       int ret;
+
+       pdata = spi->dev.platform_data;
+       if (!pdata || !pdata->base) {
+               dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+               return -EINVAL;
+       }
+
+       /*
+        * bits_per_word cannot be configured in platform data
+        */
+       spi->bits_per_word = 8;
+
+       ret = spi_setup(spi);
+       if (ret < 0)
+               return ret;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       mutex_init(&chip->lock);
+
+       dev_set_drvdata(&spi->dev, chip);
+
+       chip->spi = spi;
+
+       chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
+               chip->gpio_chip.direction_output = gen_74x164_direction_output;
+       chip->gpio_chip.get = gen_74x164_get_value;
+       chip->gpio_chip.set = gen_74x164_set_value;
+       chip->gpio_chip.base = pdata->base;
+       chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
+       chip->gpio_chip.can_sleep = 1;
+       chip->gpio_chip.dev = &spi->dev;
+       chip->gpio_chip.owner = THIS_MODULE;
+
+       ret = __gen_74x164_write_config(chip);
+       if (ret) {
+               dev_err(&spi->dev, "Failed writing: %d\n", ret);
+               goto exit_destroy;
+       }
+
+       ret = gpiochip_add(&chip->gpio_chip);
+       if (ret)
+               goto exit_destroy;
+
+       return ret;
+
+exit_destroy:
+       dev_set_drvdata(&spi->dev, NULL);
+       mutex_destroy(&chip->lock);
+       kfree(chip);
+       return ret;
+}
+
+static int gen_74x164_remove(struct spi_device *spi)
+{
+       struct gen_74x164_chip *chip;
+       int ret;
+
+       chip = dev_get_drvdata(&spi->dev);
+       if (chip == NULL)
+               return -ENODEV;
+
+       dev_set_drvdata(&spi->dev, NULL);
+
+       ret = gpiochip_remove(&chip->gpio_chip);
+       if (!ret) {
+               mutex_destroy(&chip->lock);
+               kfree(chip);
+       } else
+               dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
+                               ret);
+
+       return ret;
+}
+
+static struct spi_driver gen_74x164_driver = {
+       .driver = {
+               .name           = GEN_74X164_DRIVER_NAME,
+               .owner          = THIS_MODULE,
+       },
+       .probe          = gen_74x164_probe,
+       .remove         = __devexit_p(gen_74x164_remove),
+};
+
+static int __init gen_74x164_init(void)
+{
+       return spi_register_driver(&gen_74x164_driver);
+}
+subsys_initcall(gen_74x164_init);
+
+static void __exit gen_74x164_exit(void)
+{
+       spi_unregister_driver(&gen_74x164_driver);
+}
+module_exit(gen_74x164_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
+MODULE_DESCRIPTION("GPIO expander driver for 74X164 8-bits shift register");
+MODULE_LICENSE("GPL v2");
index 510aa2054544e57b1722a4eed03e432d0c103d57..dd9b4ba8d32d6af7f2dfb0e9733bdf947505e7eb 100644 (file)
@@ -70,6 +70,11 @@ config GPIO_MAX730X
 
 comment "Memory mapped GPIO expanders:"
 
+config GPIO_BASIC_MMIO
+       tristate "Basic memory-mapped GPIO controllers support"
+       help
+         Say yes here to support basic memory-mapped GPIO controllers.
+
 config GPIO_IT8761E
        tristate "IT8761E GPIO support"
        depends on GPIOLIB
@@ -267,6 +272,13 @@ config GPIO_ADP5588
          To compile this driver as a module, choose M here: the module will be
          called adp5588-gpio.
 
+config GPIO_ADP5588_IRQ
+       bool "Interrupt controller support for ADP5588"
+       depends on GPIO_ADP5588=y
+       help
+         Say yes here to enable the adp5588 to be used as an interrupt
+         controller. It requires the driver to be built in the kernel.
+
 comment "PCI GPIO expanders:"
 
 config GPIO_CS5535
@@ -301,6 +313,14 @@ config GPIO_LANGWELL
        help
          Say Y here to support Intel Langwell/Penwell GPIO.
 
+config GPIO_PCH
+       tristate "PCH GPIO of Intel Topcliff"
+       depends on PCI
+       help
+         This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
+         which is an IOH(Input/Output Hub) for x86 embedded processor.
+         This driver can access PCH GPIO device.
+
 config GPIO_TIMBERDALE
        bool "Support for timberdale GPIO IP"
        depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -339,6 +359,14 @@ config GPIO_MC33880
          SPI driver for Freescale MC33880 high-side/low-side switch.
          This provides GPIO interface supporting inputs and outputs.
 
+config GPIO_74X164
+       tristate "74x164 serial-in/parallel-out 8-bits shift register"
+       depends on SPI_MASTER
+       help
+         Platform driver for 74x164 compatible serial-in/parallel-out
+         8-outputs shift registers. This driver can be used to provide access
+         to more gpio outputs.
+
 comment "AC97 GPIO expanders:"
 
 config GPIO_UCB1400
index fc6019d9372019a0577daeafcb4dd9ea7cbfb35b..da2ecde5abdd5b70d90cd70f80621a6a0e8873cd 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_GPIOLIB)         += gpiolib.o
 
 obj-$(CONFIG_GPIO_ADP5520)     += adp5520-gpio.o
 obj-$(CONFIG_GPIO_ADP5588)     += adp5588-gpio.o
+obj-$(CONFIG_GPIO_BASIC_MMIO)  += basic_mmio_gpio.o
 obj-$(CONFIG_GPIO_LANGWELL)    += langwell_gpio.o
 obj-$(CONFIG_GPIO_MAX730X)     += max730x.o
 obj-$(CONFIG_GPIO_MAX7300)     += max7300.o
@@ -17,8 +18,10 @@ obj-$(CONFIG_GPIO_MAX7301)   += max7301.o
 obj-$(CONFIG_GPIO_MAX732X)     += max732x.o
 obj-$(CONFIG_GPIO_MC33880)     += mc33880.o
 obj-$(CONFIG_GPIO_MCP23S08)    += mcp23s08.o
+obj-$(CONFIG_GPIO_74X164)      += 74x164.o
 obj-$(CONFIG_GPIO_PCA953X)     += pca953x.o
 obj-$(CONFIG_GPIO_PCF857X)     += pcf857x.o
+obj-$(CONFIG_GPIO_PCH)         += pch_gpio.o
 obj-$(CONFIG_GPIO_PL061)       += pl061.o
 obj-$(CONFIG_GPIO_STMPE)       += stmpe-gpio.o
 obj-$(CONFIG_GPIO_TC35892)     += tc35892-gpio.o
index 2e8e9e24f887ff790e7866d2ac1e1f27b906ac89..0871f78af5933a83445a19b2bf956b925d3f34ba 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * GPIO Chip driver for Analog Devices
- * ADP5588 I/O Expander and QWERTY Keypad Controller
+ * ADP5588/ADP5587 I/O Expander and QWERTY Keypad Controller
  *
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <linux/i2c/adp5588.h>
 
-#define DRV_NAME               "adp5588-gpio"
-#define MAXGPIO                        18
-#define ADP_BANK(offs)         ((offs) >> 3)
-#define ADP_BIT(offs)          (1u << ((offs) & 0x7))
+#define DRV_NAME       "adp5588-gpio"
+
+/*
+ * Early pre 4.0 Silicon required to delay readout by at least 25ms,
+ * since the Event Counter Register updated 25ms after the interrupt
+ * asserted.
+ */
+#define WA_DELAYED_READOUT_REVID(rev)  ((rev) < 4)
 
 struct adp5588_gpio {
        struct i2c_client *client;
        struct gpio_chip gpio_chip;
        struct mutex lock;      /* protect cached dir, dat_out */
+       /* protect serialized access to the interrupt controller bus */
+       struct mutex irq_lock;
        unsigned gpio_start;
+       unsigned irq_base;
        uint8_t dat_out[3];
        uint8_t dir[3];
+       uint8_t int_lvl[3];
+       uint8_t int_en[3];
+       uint8_t irq_mask[3];
+       uint8_t irq_stat[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -55,8 +68,8 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
-                 & ADP_BIT(off));
+       return !!(adp5588_gpio_read(dev->client,
+                 GPIO_DAT_STAT1 + ADP5588_BANK(off)) & ADP5588_BIT(off));
 }
 
 static void adp5588_gpio_set_value(struct gpio_chip *chip,
@@ -66,8 +79,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       bank = ADP_BANK(off);
-       bit = ADP_BIT(off);
+       bank = ADP5588_BANK(off);
+       bit = ADP5588_BIT(off);
 
        mutex_lock(&dev->lock);
        if (val)
@@ -87,10 +100,10 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       bank = ADP_BANK(off);
+       bank = ADP5588_BANK(off);
 
        mutex_lock(&dev->lock);
-       dev->dir[bank] &= ~ADP_BIT(off);
+       dev->dir[bank] &= ~ADP5588_BIT(off);
        ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
        mutex_unlock(&dev->lock);
 
@@ -105,8 +118,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
        struct adp5588_gpio *dev =
            container_of(chip, struct adp5588_gpio, gpio_chip);
 
-       bank = ADP_BANK(off);
-       bit = ADP_BIT(off);
+       bank = ADP5588_BANK(off);
+       bit = ADP5588_BIT(off);
 
        mutex_lock(&dev->lock);
        dev->dir[bank] |= bit;
@@ -125,6 +138,213 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
        return ret;
 }
 
+#ifdef CONFIG_GPIO_ADP5588_IRQ
+static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off)
+{
+       struct adp5588_gpio *dev =
+               container_of(chip, struct adp5588_gpio, gpio_chip);
+       return dev->irq_base + off;
+}
+
+static void adp5588_irq_bus_lock(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       mutex_lock(&dev->irq_lock);
+}
+
+ /*
+  * genirq core code can issue chip->mask/unmask from atomic context.
+  * This doesn't work for slow busses where an access needs to sleep.
+  * bus_sync_unlock() is therefore called outside the atomic context,
+  * syncs the current irq mask state with the slow external controller
+  * and unlocks the bus.
+  */
+
+static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       int i;
+
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+               if (dev->int_en[i] ^ dev->irq_mask[i]) {
+                       dev->int_en[i] = dev->irq_mask[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
+                                          dev->int_en[i]);
+               }
+
+       mutex_unlock(&dev->irq_lock);
+}
+
+static void adp5588_irq_mask(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       unsigned gpio = irq - dev->irq_base;
+
+       dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
+}
+
+static void adp5588_irq_unmask(unsigned int irq)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       unsigned gpio = irq - dev->irq_base;
+
+       dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
+}
+
+static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+{
+       struct adp5588_gpio *dev = get_irq_chip_data(irq);
+       uint16_t gpio = irq - dev->irq_base;
+       unsigned bank, bit;
+
+       if ((type & IRQ_TYPE_EDGE_BOTH)) {
+               dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
+                       irq, type);
+               return -EINVAL;
+       }
+
+       bank = ADP5588_BANK(gpio);
+       bit = ADP5588_BIT(gpio);
+
+       if (type & IRQ_TYPE_LEVEL_HIGH)
+               dev->int_lvl[bank] |= bit;
+       else if (type & IRQ_TYPE_LEVEL_LOW)
+               dev->int_lvl[bank] &= ~bit;
+       else
+               return -EINVAL;
+
+       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
+       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
+                          dev->int_lvl[bank]);
+
+       return 0;
+}
+
+static struct irq_chip adp5588_irq_chip = {
+       .name                   = "adp5588",
+       .mask                   = adp5588_irq_mask,
+       .unmask                 = adp5588_irq_unmask,
+       .bus_lock               = adp5588_irq_bus_lock,
+       .bus_sync_unlock        = adp5588_irq_bus_sync_unlock,
+       .set_type               = adp5588_irq_set_type,
+};
+
+static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
+{
+       int ret = i2c_smbus_read_i2c_block_data(client, GPIO_INT_STAT1, 3, buf);
+
+       if (ret < 0)
+               dev_err(&client->dev, "Read INT_STAT Error\n");
+
+       return ret;
+}
+
+static irqreturn_t adp5588_irq_handler(int irq, void *devid)
+{
+       struct adp5588_gpio *dev = devid;
+       unsigned status, bank, bit, pending;
+       int ret;
+       status = adp5588_gpio_read(dev->client, INT_STAT);
+
+       if (status & ADP5588_GPI_INT) {
+               ret = adp5588_gpio_read_intstat(dev->client, dev->irq_stat);
+               if (ret < 0)
+                       memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
+
+               for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+                       bank++, bit = 0) {
+                       pending = dev->irq_stat[bank] & dev->irq_mask[bank];
+
+                       while (pending) {
+                               if (pending & (1 << bit)) {
+                                       handle_nested_irq(dev->irq_base +
+                                                         (bank << 3) + bit);
+                                       pending &= ~(1 << bit);
+
+                               }
+                               bit++;
+                       }
+               }
+       }
+
+       adp5588_gpio_write(dev->client, INT_STAT, status); /* Status is W1C */
+
+       return IRQ_HANDLED;
+}
+
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+       struct i2c_client *client = dev->client;
+       struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+       unsigned gpio;
+       int ret;
+
+       adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC);
+       adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */
+       adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */
+
+       dev->irq_base = pdata->irq_base;
+       mutex_init(&dev->irq_lock);
+
+       for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) {
+               int irq = gpio + dev->irq_base;
+               set_irq_chip_data(irq, dev);
+               set_irq_chip_and_handler(irq, &adp5588_irq_chip,
+                                        handle_level_irq);
+               set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+               /*
+                * ARM needs us to explicitly flag the IRQ as VALID,
+                * once we do so, it will also set the noprobe.
+                */
+               set_irq_flags(irq, IRQF_VALID);
+#else
+               set_irq_noprobe(irq);
+#endif
+       }
+
+       ret = request_threaded_irq(client->irq,
+                                  NULL,
+                                  adp5588_irq_handler,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  dev_name(&client->dev), dev);
+       if (ret) {
+               dev_err(&client->dev, "failed to request irq %d\n",
+                       client->irq);
+               goto out;
+       }
+
+       dev->gpio_chip.to_irq = adp5588_gpio_to_irq;
+       adp5588_gpio_write(client, CFG,
+               ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT);
+
+       return 0;
+
+out:
+       dev->irq_base = 0;
+       return ret;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+       if (dev->irq_base)
+               free_irq(dev->client->irq, dev);
+}
+
+#else
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+       struct i2c_client *client = dev->client;
+       dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+       return 0;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+}
+#endif /* CONFIG_GPIO_ADP5588_IRQ */
+
 static int __devinit adp5588_gpio_probe(struct i2c_client *client,
                                        const struct i2c_device_id *id)
 {
@@ -160,37 +380,46 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
        gc->can_sleep = 1;
 
        gc->base = pdata->gpio_start;
-       gc->ngpio = MAXGPIO;
+       gc->ngpio = ADP5588_MAXGPIO;
        gc->label = client->name;
        gc->owner = THIS_MODULE;
 
        mutex_init(&dev->lock);
 
-
        ret = adp5588_gpio_read(dev->client, DEV_ID);
        if (ret < 0)
                goto err;
 
        revid = ret & ADP5588_DEVICE_ID_MASK;
 
-       for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
+       for (i = 0, ret = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
                dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
                dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
                ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
                ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
                                (pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
-
+               ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0);
                if (ret)
                        goto err;
        }
 
+       if (pdata->irq_base) {
+               if (WA_DELAYED_READOUT_REVID(revid)) {
+                       dev_warn(&client->dev, "GPIO int not supported\n");
+               } else {
+                       ret = adp5588_irq_setup(dev);
+                       if (ret)
+                               goto err;
+               }
+       }
+
        ret = gpiochip_add(&dev->gpio_chip);
        if (ret)
-               goto err;
+               goto err_irq;
 
-       dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
+       dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
                        gc->base, gc->base + gc->ngpio - 1,
-                       client->name, revid);
+                       pdata->irq_base, client->name, revid);
 
        if (pdata->setup) {
                ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
@@ -199,8 +428,11 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
        }
 
        i2c_set_clientdata(client, dev);
+
        return 0;
 
+err_irq:
+       adp5588_irq_teardown(dev);
 err:
        kfree(dev);
        return ret;
@@ -222,6 +454,9 @@ static int __devexit adp5588_gpio_remove(struct i2c_client *client)
                }
        }
 
+       if (dev->irq_base)
+               free_irq(dev->client->irq, dev);
+
        ret = gpiochip_remove(&dev->gpio_chip);
        if (ret) {
                dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/basic_mmio_gpio.c
new file mode 100644 (file)
index 0000000..3addea6
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * Driver for basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
+ * ...``                                                         ```````..
+ * ..The simplest form of a GPIO controller that the driver supports is``
+ *  `.just a single "data" register, where GPIO state can be read and/or `
+ *    `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.```````
+ *        `````````
+                                    ___
+_/~~|___/~|   . ```~~~~~~       ___/___\___     ,~.`.`.`.`````.~~...,,,,...
+__________|~$@~~~        %~    /o*o*o*o*o*o\   .. Implementing such a GPIO .
+o        `                     ~~~~\___/~~~~    ` controller in FPGA is ,.`
+                                                 `....trivial..'~`.```.```
+ *                                                    ```````
+ *  .```````~~~~`..`.``.``.
+ * .  The driver supports  `...       ,..```.`~~~```````````````....````.``,,
+ * .   big-endian notation, just`.  .. A bit more sophisticated controllers ,
+ *  . register the device with -be`. .with a pair of set/clear-bit registers ,
+ *   `.. suffix.  ```~~`````....`.`   . affecting the data register and the .`
+ *     ``.`.``...```                  ```.. output pins are also supported.`
+ *                        ^^             `````.`````````.,``~``~``~~``````
+ *                                                   .                  ^^
+ *   ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`..
+ * .. The expectation is that in at least some cases .    ,-~~~-,
+ *  .this will be used with roll-your-own ASIC/FPGA .`     \   /
+ *  .logic in Verilog or VHDL. ~~~`````````..`````~~`       \ /
+ *  ..````````......```````````                             \o_
+ *                                                           |
+ *                              ^^                          / \
+ *
+ *           ...`````~~`.....``.`..........``````.`.``.```........``.
+ *            `  8, 16, 32 and 64 bits registers are supported, and``.
+ *            . the number of GPIOs is determined by the width of   ~
+ *             .. the registers. ,............```.`.`..`.`.~~~.`.`.`~
+ *               `.......````.```
+ */
+
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/basic_mmio_gpio.h>
+
+struct bgpio_chip {
+       struct gpio_chip gc;
+       void __iomem *reg_dat;
+       void __iomem *reg_set;
+       void __iomem *reg_clr;
+
+       /* Number of bits (GPIOs): <register width> * 8. */
+       int bits;
+
+       /*
+        * Some GPIO controllers work with the big-endian bits notation,
+        * e.g. in a 8-bits register, GPIO7 is the least significant bit.
+        */
+       int big_endian_bits;
+
+       /*
+        * Used to lock bgpio_chip->data. Also, this is needed to keep
+        * shadowed and real data registers writes together.
+        */
+       spinlock_t lock;
+
+       /* Shadowed data register to clear/set bits safely. */
+       unsigned long data;
+};
+
+static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+{
+       return container_of(gc, struct bgpio_chip, gc);
+}
+
+static unsigned long bgpio_in(struct bgpio_chip *bgc)
+{
+       switch (bgc->bits) {
+       case 8:
+               return __raw_readb(bgc->reg_dat);
+       case 16:
+               return __raw_readw(bgc->reg_dat);
+       case 32:
+               return __raw_readl(bgc->reg_dat);
+#if BITS_PER_LONG >= 64
+       case 64:
+               return __raw_readq(bgc->reg_dat);
+#endif
+       }
+       return -EINVAL;
+}
+
+static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg,
+                     unsigned long data)
+{
+       switch (bgc->bits) {
+       case 8:
+               __raw_writeb(data, reg);
+               return;
+       case 16:
+               __raw_writew(data, reg);
+               return;
+       case 32:
+               __raw_writel(data, reg);
+               return;
+#if BITS_PER_LONG >= 64
+       case 64:
+               __raw_writeq(data, reg);
+               return;
+#endif
+       }
+}
+
+static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
+{
+       if (bgc->big_endian_bits)
+               return 1 << (bgc->bits - 1 - pin);
+       else
+               return 1 << pin;
+}
+
+static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+       return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio);
+}
+
+static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long mask = bgpio_pin2mask(bgc, gpio);
+       unsigned long flags;
+
+       if (bgc->reg_set) {
+               if (val)
+                       bgpio_out(bgc, bgc->reg_set, mask);
+               else
+                       bgpio_out(bgc, bgc->reg_clr, mask);
+               return;
+       }
+
+       spin_lock_irqsave(&bgc->lock, flags);
+
+       if (val)
+               bgc->data |= mask;
+       else
+               bgc->data &= ~mask;
+
+       bgpio_out(bgc, bgc->reg_dat, bgc->data);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       return 0;
+}
+
+static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       bgpio_set(gc, gpio, val);
+       return 0;
+}
+
+static int __devinit bgpio_probe(struct platform_device *pdev)
+{
+       const struct platform_device_id *platid = platform_get_device_id(pdev);
+       struct device *dev = &pdev->dev;
+       struct bgpio_pdata *pdata = dev_get_platdata(dev);
+       struct bgpio_chip *bgc;
+       struct resource *res_dat;
+       struct resource *res_set;
+       struct resource *res_clr;
+       resource_size_t dat_sz;
+       int bits;
+       int ret;
+
+       res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+       if (!res_dat)
+               return -EINVAL;
+
+       dat_sz = resource_size(res_dat);
+       if (!is_power_of_2(dat_sz))
+               return -EINVAL;
+
+       bits = dat_sz * 8;
+       if (bits > BITS_PER_LONG)
+               return -EINVAL;
+
+       bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL);
+       if (!bgc)
+               return -ENOMEM;
+
+       bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz);
+       if (!bgc->reg_dat)
+               return -ENOMEM;
+
+       res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set");
+       res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr");
+       if (res_set && res_clr) {
+               if (resource_size(res_set) != resource_size(res_clr) ||
+                               resource_size(res_set) != dat_sz)
+                       return -EINVAL;
+
+               bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz);
+               bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz);
+               if (!bgc->reg_set || !bgc->reg_clr)
+                       return -ENOMEM;
+       } else if (res_set || res_clr) {
+               return -EINVAL;
+       }
+
+       spin_lock_init(&bgc->lock);
+
+       bgc->bits = bits;
+       bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be");
+       bgc->data = bgpio_in(bgc);
+
+       bgc->gc.ngpio = bits;
+       bgc->gc.direction_input = bgpio_dir_in;
+       bgc->gc.direction_output = bgpio_dir_out;
+       bgc->gc.get = bgpio_get;
+       bgc->gc.set = bgpio_set;
+       bgc->gc.dev = dev;
+       bgc->gc.label = dev_name(dev);
+
+       if (pdata)
+               bgc->gc.base = pdata->base;
+       else
+               bgc->gc.base = -1;
+
+       dev_set_drvdata(dev, bgc);
+
+       ret = gpiochip_add(&bgc->gc);
+       if (ret)
+               dev_err(dev, "gpiochip_add() failed: %d\n", ret);
+
+       return ret;
+}
+
+static int __devexit bgpio_remove(struct platform_device *pdev)
+{
+       struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev);
+
+       return gpiochip_remove(&bgc->gc);
+}
+
+static const struct platform_device_id bgpio_id_table[] = {
+       { "basic-mmio-gpio", },
+       { "basic-mmio-gpio-be", },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+
+static struct platform_driver bgpio_driver = {
+       .driver = {
+               .name = "basic-mmio-gpio",
+       },
+       .id_table = bgpio_id_table,
+       .probe = bgpio_probe,
+       .remove = __devexit_p(bgpio_remove),
+};
+
+static int __init bgpio_init(void)
+{
+       return platform_driver_register(&bgpio_driver);
+}
+module_init(bgpio_init);
+
+static void __exit bgpio_exit(void)
+{
+       platform_driver_unregister(&bgpio_driver);
+}
+module_exit(bgpio_exit);
+
+MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
+MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
+MODULE_LICENSE("GPL");
index 8383a8d7f9945dee14f15fb6b4ba3c08a3885fe6..64db9dc3a275d94fb2cbbc39c00e8561eb8817cb 100644 (file)
 /* Supports:
  * Moorestown platform Langwell chip.
  * Medfield platform Penwell chip.
+ * Whitney point.
  */
 
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/stddef.h>
@@ -158,15 +160,15 @@ static int lnw_irq_type(unsigned irq, unsigned type)
        spin_unlock_irqrestore(&lnw->lock, flags);
 
        return 0;
-};
+}
 
 static void lnw_irq_unmask(unsigned irq)
 {
-};
+}
 
 static void lnw_irq_mask(unsigned irq)
 {
-};
+}
 
 static struct irq_chip lnw_irqchip = {
        .name           = "LNW-GPIO",
@@ -300,9 +302,88 @@ static struct pci_driver lnw_gpio_driver = {
        .probe          = lnw_gpio_probe,
 };
 
+
+static int __devinit wp_gpio_probe(struct platform_device *pdev)
+{
+       struct lnw_gpio *lnw;
+       struct gpio_chip *gc;
+       struct resource *rc;
+       int retval = 0;
+
+       rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!rc)
+               return -EINVAL;
+
+       lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+       if (!lnw) {
+               dev_err(&pdev->dev,
+                       "can't allocate whitneypoint_gpio chip data\n");
+               return -ENOMEM;
+       }
+       lnw->reg_base = ioremap_nocache(rc->start, resource_size(rc));
+       if (lnw->reg_base == NULL) {
+               retval = -EINVAL;
+               goto err_kmalloc;
+       }
+       spin_lock_init(&lnw->lock);
+       gc = &lnw->chip;
+       gc->label = dev_name(&pdev->dev);
+       gc->owner = THIS_MODULE;
+       gc->direction_input = lnw_gpio_direction_input;
+       gc->direction_output = lnw_gpio_direction_output;
+       gc->get = lnw_gpio_get;
+       gc->set = lnw_gpio_set;
+       gc->to_irq = NULL;
+       gc->base = 0;
+       gc->ngpio = 64;
+       gc->can_sleep = 0;
+       retval = gpiochip_add(gc);
+       if (retval) {
+               dev_err(&pdev->dev, "whitneypoint gpiochip_add error %d\n",
+                                                               retval);
+               goto err_ioremap;
+       }
+       platform_set_drvdata(pdev, lnw);
+       return 0;
+err_ioremap:
+       iounmap(lnw->reg_base);
+err_kmalloc:
+       kfree(lnw);
+       return retval;
+}
+
+static int __devexit wp_gpio_remove(struct platform_device *pdev)
+{
+       struct lnw_gpio *lnw = platform_get_drvdata(pdev);
+       int err;
+       err = gpiochip_remove(&lnw->chip);
+       if (err)
+               dev_err(&pdev->dev, "failed to remove gpio_chip.\n");
+       iounmap(lnw->reg_base);
+       kfree(lnw);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static struct platform_driver wp_gpio_driver = {
+       .probe          = wp_gpio_probe,
+       .remove         = __devexit_p(wp_gpio_remove),
+       .driver         = {
+               .name   = "wp_gpio",
+               .owner  = THIS_MODULE,
+       },
+};
+
 static int __init lnw_gpio_init(void)
 {
-       return pci_register_driver(&lnw_gpio_driver);
+       int ret;
+       ret =  pci_register_driver(&lnw_gpio_driver);
+       if (ret < 0)
+               return ret;
+       ret = platform_driver_register(&wp_gpio_driver);
+       if (ret < 0)
+               pci_unregister_driver(&lnw_gpio_driver);
+       return ret;
 }
 
 device_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
new file mode 100644 (file)
index 0000000..0eba0a7
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCH_GPIO_ALL_PINS      0xfff /* Mask for GPIO pins 0 to 11 */
+#define GPIO_NUM_PINS  12      /* Specifies number of GPIO PINS GPIO0-GPIO11 */
+
+struct pch_regs {
+       u32     ien;
+       u32     istatus;
+       u32     idisp;
+       u32     iclr;
+       u32     imask;
+       u32     imaskclr;
+       u32     po;
+       u32     pi;
+       u32     pm;
+       u32     im0;
+       u32     im1;
+       u32     reserved[4];
+       u32     reset;
+};
+
+/**
+ * struct pch_gpio_reg_data - The register store data.
+ * @po_reg:    To store contents of PO register.
+ * @pm_reg:    To store contents of PM register.
+ */
+struct pch_gpio_reg_data {
+       u32 po_reg;
+       u32 pm_reg;
+};
+
+/**
+ * struct pch_gpio - GPIO private data structure.
+ * @base:                      PCI base address of Memory mapped I/O register.
+ * @reg:                       Memory mapped PCH GPIO register list.
+ * @dev:                       Pointer to device structure.
+ * @gpio:                      Data for GPIO infrastructure.
+ * @pch_gpio_reg:              Memory mapped Register data is saved here
+ *                             when suspend.
+ */
+struct pch_gpio {
+       void __iomem *base;
+       struct pch_regs __iomem *reg;
+       struct device *dev;
+       struct gpio_chip gpio;
+       struct pch_gpio_reg_data pch_gpio_reg;
+       struct mutex lock;
+};
+
+static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+       u32 reg_val;
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+       mutex_lock(&chip->lock);
+       reg_val = ioread32(&chip->reg->po);
+       if (val)
+               reg_val |= (1 << nr);
+       else
+               reg_val &= ~(1 << nr);
+
+       iowrite32(reg_val, &chip->reg->po);
+       mutex_unlock(&chip->lock);
+}
+
+static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+       return ioread32(&chip->reg->pi) & (1 << nr);
+}
+
+static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+                                    int val)
+{
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+       u32 pm;
+       u32 reg_val;
+
+       mutex_lock(&chip->lock);
+       pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS;
+       pm |= (1 << nr);
+       iowrite32(pm, &chip->reg->pm);
+
+       reg_val = ioread32(&chip->reg->po);
+       if (val)
+               reg_val |= (1 << nr);
+       else
+               reg_val &= ~(1 << nr);
+
+       mutex_unlock(&chip->lock);
+
+       return 0;
+}
+
+static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+       struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+       u32 pm;
+
+       mutex_lock(&chip->lock);
+       pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS; /*bits 0-11*/
+       pm &= ~(1 << nr);
+       iowrite32(pm, &chip->reg->pm);
+       mutex_unlock(&chip->lock);
+
+       return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
+{
+       chip->pch_gpio_reg.po_reg = ioread32(&chip->reg->po);
+       chip->pch_gpio_reg.pm_reg = ioread32(&chip->reg->pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
+{
+       /* to store contents of PO register */
+       iowrite32(chip->pch_gpio_reg.po_reg, &chip->reg->po);
+       /* to store contents of PM register */
+       iowrite32(chip->pch_gpio_reg.pm_reg, &chip->reg->pm);
+}
+
+static void pch_gpio_setup(struct pch_gpio *chip)
+{
+       struct gpio_chip *gpio = &chip->gpio;
+
+       gpio->label = dev_name(chip->dev);
+       gpio->owner = THIS_MODULE;
+       gpio->direction_input = pch_gpio_direction_input;
+       gpio->get = pch_gpio_get;
+       gpio->direction_output = pch_gpio_direction_output;
+       gpio->set = pch_gpio_set;
+       gpio->dbg_show = NULL;
+       gpio->base = -1;
+       gpio->ngpio = GPIO_NUM_PINS;
+       gpio->can_sleep = 0;
+}
+
+static int __devinit pch_gpio_probe(struct pci_dev *pdev,
+                                   const struct pci_device_id *id)
+{
+       s32 ret;
+       struct pch_gpio *chip;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (chip == NULL)
+               return -ENOMEM;
+
+       chip->dev = &pdev->dev;
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "%s : pci_enable_device FAILED", __func__);
+               goto err_pci_enable;
+       }
+
+       ret = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
+               goto err_request_regions;
+       }
+
+       chip->base = pci_iomap(pdev, 1, 0);
+       if (chip->base == 0) {
+               dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
+               ret = -ENOMEM;
+               goto err_iomap;
+       }
+
+       chip->reg = chip->base;
+       pci_set_drvdata(pdev, chip);
+       mutex_init(&chip->lock);
+       pch_gpio_setup(chip);
+       ret = gpiochip_add(&chip->gpio);
+       if (ret) {
+               dev_err(&pdev->dev, "PCH gpio: Failed to register GPIO\n");
+               goto err_gpiochip_add;
+       }
+
+       return 0;
+
+err_gpiochip_add:
+       pci_iounmap(pdev, chip->base);
+
+err_iomap:
+       pci_release_regions(pdev);
+
+err_request_regions:
+       pci_disable_device(pdev);
+
+err_pci_enable:
+       kfree(chip);
+       dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+       return ret;
+}
+
+static void __devexit pch_gpio_remove(struct pci_dev *pdev)
+{
+       int err;
+       struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+       err = gpiochip_remove(&chip->gpio);
+       if (err)
+               dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+
+       pci_iounmap(pdev, chip->base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int pch_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       s32 ret;
+       struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+       pch_gpio_save_reg_conf(chip);
+       pch_gpio_restore_reg_conf(chip);
+
+       ret = pci_save_state(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+               return ret;
+       }
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+       ret = pci_enable_wake(pdev, PCI_D0, 1);
+       if (ret)
+               dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+       return 0;
+}
+
+static int pch_gpio_resume(struct pci_dev *pdev)
+{
+       s32 ret;
+       struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+       ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+       pci_set_power_state(pdev, PCI_D0);
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+               return ret;
+       }
+       pci_restore_state(pdev);
+
+       iowrite32(0x01, &chip->reg->reset);
+       iowrite32(0x00, &chip->reg->reset);
+       pch_gpio_restore_reg_conf(chip);
+
+       return 0;
+}
+#else
+#define pch_gpio_suspend NULL
+#define pch_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+       { 0, }
+};
+
+static struct pci_driver pch_gpio_driver = {
+       .name = "pch_gpio",
+       .id_table = pch_gpio_pcidev_id,
+       .probe = pch_gpio_probe,
+       .remove = __devexit_p(pch_gpio_remove),
+       .suspend = pch_gpio_suspend,
+       .resume = pch_gpio_resume
+};
+
+static int __init pch_gpio_pci_init(void)
+{
+       return pci_register_driver(&pch_gpio_driver);
+}
+module_init(pch_gpio_pci_init);
+
+static void __exit pch_gpio_pci_exit(void)
+{
+       pci_unregister_driver(&pch_gpio_driver);
+}
+module_exit(pch_gpio_pci_exit);
+
+MODULE_DESCRIPTION("PCH GPIO PCI Driver");
+MODULE_LICENSE("GPL");
index ddd053108a136ce2a41dd346a438622609c7f480..45293662e95045594c2a9ed12ece640c2590f93f 100644 (file)
@@ -47,6 +47,7 @@ struct timbgpio {
        spinlock_t              lock; /* mutual exclusion */
        struct gpio_chip        gpio;
        int                     irq_base;
+       unsigned long           last_ier;
 };
 
 static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
@@ -112,16 +113,24 @@ static void timbgpio_irq_disable(unsigned irq)
 {
        struct timbgpio *tgpio = get_irq_chip_data(irq);
        int offset = irq - tgpio->irq_base;
+       unsigned long flags;
 
-       timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+       spin_lock_irqsave(&tgpio->lock, flags);
+       tgpio->last_ier &= ~(1 << offset);
+       iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+       spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
 static void timbgpio_irq_enable(unsigned irq)
 {
        struct timbgpio *tgpio = get_irq_chip_data(irq);
        int offset = irq - tgpio->irq_base;
+       unsigned long flags;
 
-       timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+       spin_lock_irqsave(&tgpio->lock, flags);
+       tgpio->last_ier |= 1 << offset;
+       iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+       spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
 static int timbgpio_irq_type(unsigned irq, unsigned trigger)
@@ -194,8 +203,16 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
        ipr = ioread32(tgpio->membase + TGPIO_IPR);
        iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
+       /*
+        * Some versions of the hardware trash the IER register if more than
+        * one interrupt is received simultaneously.
+        */
+       iowrite32(0, tgpio->membase + TGPIO_IER);
+
        for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
                generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+
+       iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
 }
 
 static struct irq_chip timbgpio_irqchip = {
index 086b9b0416c481af96ed8e5d4c0af08b564f204b..ac3b6dde23db7ca1fd5f80083436cb49fcce0312 100644 (file)
@@ -495,6 +495,7 @@ done:
                dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
                return r;
        }
+       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
        return 0;
 }
 
@@ -502,6 +503,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
 {
        int r;
 
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        if (rdev->r600_blit.shader_obj == NULL)
                return;
        /* If we can't reserve the bo, unref should be enough to destroy
index 6d1540c0bfed1c192e258bb2a4c65b74e559b2a0..0e8f28a689271c88a88d3865ead4890089049dca 100644 (file)
@@ -3180,6 +3180,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
        for (u = 0; u < track->num_texture; u++) {
                if (!track->textures[u].enabled)
                        continue;
+               if (track->textures[u].lookup_disable)
+                       continue;
                robj = track->textures[u].robj;
                if (robj == NULL) {
                        DRM_ERROR("No texture bound to unit %u\n", u);
@@ -3414,6 +3416,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
                track->textures[i].robj = NULL;
                /* CS IB emission code makes sure texture unit are disabled */
                track->textures[i].enabled = false;
+               track->textures[i].lookup_disable = false;
                track->textures[i].roundup_w = true;
                track->textures[i].roundup_h = true;
                if (track->separate_cube)
index f47cdca1c004400586d8eafcad3175d509a504fc..af65600e65648eeefbe87deb90c5be3b976a3c6a 100644 (file)
@@ -46,6 +46,7 @@ struct r100_cs_track_texture {
        unsigned                height_11;
        bool                    use_pitch;
        bool                    enabled;
+       bool                    lookup_disable;
        bool                    roundup_w;
        bool                    roundup_h;
        unsigned                compress_format;
index 0266d72e0a4cacaa8e52a79c85e6536e11b5df09..d2408c395619b6bd81b47ad282c3363411b975f8 100644 (file)
@@ -447,6 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
                        track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
                }
+               if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+                       track->textures[i].lookup_disable = true;
                switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
                case R200_TXFORMAT_I8:
                case R200_TXFORMAT_RGB332:
index 7b294c127c5ff7c5000174390bb600767870eff5..37cc2aa9f923cbace55e151d33870d1ac910a6a2 100644 (file)
@@ -310,7 +310,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
        /* Check depth buffer */
        if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
                G_028800_Z_ENABLE(track->db_depth_control)) {
-               u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
+               u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max;
                if (track->db_bo == NULL) {
                        dev_warn(p->dev, "z/stencil with no depth buffer\n");
                        return -EINVAL;
@@ -354,11 +354,11 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
                } else {
                        size = radeon_bo_size(track->db_bo);
                        pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
-                       height = size / (pitch * 8 * bpe);
-                       height &= ~0x7;
-                       if (!height)
-                               height = 8;
-
+                       slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+                       slice_tile_max *= 64;
+                       height = slice_tile_max / (pitch * 8);
+                       if (height > 8192)
+                               height = 8192;
                        switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
                        case V_028010_ARRAY_1D_TILED_THIN1:
                                pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
@@ -367,6 +367,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
                                                 __func__, __LINE__, pitch);
                                        return -EINVAL;
                                }
+                               /* don't break userspace */
+                               height &= ~0x7;
                                if (!IS_ALIGNED(height, 8)) {
                                        dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
                                                 __func__, __LINE__, height);
index c332f46340d5f632122fc82b0c49111b51f11b52..64928814de53f1a4bf6c7c76eb0889ac14161b9a 100644 (file)
 #       define R200_TXFORMAT_ST_ROUTE_STQ5     (5 << 24)
 #       define R200_TXFORMAT_ST_ROUTE_MASK     (7 << 24)
 #       define R200_TXFORMAT_ST_ROUTE_SHIFT    24
+#       define R200_TXFORMAT_LOOKUP_DISABLE    (1 << 27)
 #       define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
 #       define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
 #       define R200_TXFORMAT_CUBIC_MAP_ENABLE          (1 << 30)
index 4cb4bb009950ca98aec8aedf901629924cadff72..53fab518b3dac3fbf6dba85048163bf35d133e36 100644 (file)
@@ -560,7 +560,8 @@ static const struct pci_device_id scx200_pci[] __initconst = {
        { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
          .driver_data = 1 },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
-         .driver_data = 2 }
+         .driver_data = 2 },
+       { 0, }
 };
 
 static struct {
index af25e1f3efd4a784daa5aa78128e908e07cef648..e90db8870b6c7a0212a012d2935d91cffdcd52c0 100644 (file)
@@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
                mdelay(10);
                hw->ipac.isac.adf2 = 0x87;
                hw->ipac.hscx[0].slot = 0x1f;
-               hw->ipac.hscx[0].slot = 0x23;
+               hw->ipac.hscx[1].slot = 0x23;
                break;
        case INF_GAZEL_R753:
                val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
index b0554f80bfb3f2f226b9000508cd5b718dd5fbef..ee4dae1382e0958ca831c2fb738953ab0a775fe3 100644 (file)
@@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
        char tmp[80];
        struct sk_buff *skb = arg;
 
-       p = skb->data;
-
        /* Channel Identification */
-       p = skb->data;
-       if ((p = findie(p, skb->len, WE0_chanID, 0))) {
+       p = findie(skb->data, skb->len, WE0_chanID, 0);
+       if (p) {
                if (p[1] != 1) {
                        l3_1tr6_error(pc, "setup wrong chanID len", skb);
                        return;
index 0acf6396e068212d14bb36d240f62304d9087565..202581808bdc63087567e49c1323c08123f80d79 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/completion.h>
-#include <linux/errno.h>
 #include <linux/mutex.h>
 #include <linux/wait.h>
 #include <linux/unistd.h>
index 86fe67fd49ba0c1dd8d794de61839766c2efacb5..9334539ebf75bcb01b91b088bedd7aca9199a03d 100644 (file)
@@ -1041,7 +1041,7 @@ config SMC911X
        tristate "SMSC LAN911[5678] support"
        select CRC32
        select MII
-       depends on ARM || SUPERH
+       depends on ARM || SUPERH || MN10300
        help
          This is a driver for SMSC's LAN911x series of Ethernet chipsets
          including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1055,7 +1055,7 @@ config SMC911X
 
 config SMSC911X
        tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
-       depends on ARM || SUPERH || BLACKFIN || MIPS
+       depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
        select CRC32
        select MII
        select PHYLIB
@@ -1067,6 +1067,14 @@ config SMSC911X
          <file:Documentation/networking/net-modules.txt>. The module
          will be called smsc911x.
 
+config SMSC911X_ARCH_HOOKS
+       def_bool n
+       depends on SMSC911X
+       help
+         If the arch enables this, it allows the arch to implement various
+         hooks for more comprehensive interrupt control and also to override
+         the source of the MAC address.
+
 config NET_VENDOR_RACAL
        bool "Racal-Interlan (Micom) NI cards"
        depends on ISA
index ef4115b897bf8d394b85e4123ca7160c73eaf134..9ab58097fa2e794fe46b8e1547d7bd762153a8f7 100644 (file)
@@ -631,8 +631,6 @@ struct atl1c_adapter {
 extern char atl1c_driver_name[];
 extern char atl1c_driver_version[];
 
-extern int atl1c_up(struct atl1c_adapter *adapter);
-extern void atl1c_down(struct atl1c_adapter *adapter);
 extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
 extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
 extern void atl1c_set_ethtool_ops(struct net_device *netdev);
index 99ffcf667d1f7cc08287aa20a12f117e52a43432..09b099bfab2b1b75c5dc7e3f0b323393e0124f8a 100644 (file)
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
 static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
 static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
                   int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
 
 static const u16 atl1c_pay_load_size[] = {
        128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
        return err;
 }
 
-int atl1c_up(struct atl1c_adapter *adapter)
+static int atl1c_up(struct atl1c_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
        return err;
 }
 
-void atl1c_down(struct atl1c_adapter *adapter)
+static void atl1c_down(struct atl1c_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
index dbd27b8e66bdac2f324e6b01fd53ac16cf9d41c9..43579b3b24acce4e23caac9e9ce1d2f2f811af44 100644 (file)
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
 /* Temporary hack for merging atl1 and atl2 */
 #include "atlx.c"
 
+static const struct ethtool_ops atl1_ethtool_ops;
+
 /*
  * This is the only thing that needs to be changed to adjust the
  * maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
  * hw - Struct containing variables accessed by shared code
  * reg_addr - address of the PHY register to read
  */
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
 {
        u32 val;
        int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
  *          1. calcu 32bit CRC for multicast address
  *          2. reverse crc with MSB to LSB
  */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
 {
        u32 crc32, value = 0;
        int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
  * hw - Struct containing variables accessed by shared code
  * hash_value - Multicast address hash value
  */
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
 {
        u32 hash_bit, hash_reg;
        u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
        return 0;
 }
 
-void atl1_set_mac_addr(struct atl1_hw *hw)
+static void atl1_set_mac_addr(struct atl1_hw *hw)
 {
        u32 value;
        /*
@@ -3658,7 +3660,7 @@ static int atl1_nway_reset(struct net_device *netdev)
        return 0;
 }
 
-const struct ethtool_ops atl1_ethtool_ops = {
+static const struct ethtool_ops atl1_ethtool_ops = {
        .get_settings           = atl1_get_settings,
        .set_settings           = atl1_set_settings,
        .get_drvinfo            = atl1_get_drvinfo,
index 9c0ddb273ac81a38c0de24fa315c4c9d5519cde4..68de8cbfb3ec05da6244f1225ea23b9cef58b19c 100644 (file)
@@ -56,16 +56,13 @@ struct atl1_adapter;
 struct atl1_hw;
 
 /* function prototypes needed by multiple files */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
        int cmd);
 static u32 atl1_check_link(struct atl1_adapter *adapter);
 
-extern const struct ethtool_ops atl1_ethtool_ops;
-
 /* hardware definitions specific to L1 */
 
 /* Block IDLE Status Register */
index f979ea2d6d3cd7cd6c080bfbd57cc8159b0d3279..afb7f7dd1bb133901aab8f71c30bc2d86ef35fd7 100644 (file)
 
 #include "atlx.h"
 
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
 static struct atlx_spi_flash_dev flash_table[] = {
 /*     MFR_NAME  WRSR  READ  PRGM  WREN  WRDI  RDSR  RDID  SEC_ERS CHIP_ERS */
        {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52,   0x62},
index 1e7f305ed00b310ea91aa73b40620f37794b739e..36eca1ce75d4e29a4c41bbf285931ce003a6a92a 100644 (file)
@@ -1471,42 +1471,6 @@ err:
        return status;
 }
 
-/* Uses sync mcc */
-int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
-                               u8 *connector)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_port_type *req;
-       int status;
-
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
-       req = embedded_payload(wrb);
-
-       be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
-                       OPCODE_COMMON_READ_TRANSRECV_DATA);
-
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
-
-       req->port = cpu_to_le32(port);
-       req->page_num = cpu_to_le32(TR_PAGE_A0);
-       status = be_mcc_notify_wait(adapter);
-       if (!status) {
-               struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
-                       *connector = resp->data.connector;
-       }
-
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
-       return status;
-}
-
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
                        u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
index c7f6cdfe1c73df85bc4faf251e05b19dd6602791..8469ff061f30e092f9ef5386114b4ce2ec595a2b 100644 (file)
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
                        u8 port_num, u8 beacon, u8 status, u8 state);
 extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
                        u8 port_num, u32 *state);
-extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
-                                       u8 *connector);
 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
                        struct be_dma_mem *cmd, u32 flash_oper,
                        u32 flash_opcode, u32 buf_size);
index 45b1f6635282f8cc3bb07098f92d119b4e54f16e..c36cd2ffbadcaa41e15a043a937d1b1ad62c22b4 100644 (file)
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
                stats->rx_mcast_pkts++;
 }
 
-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
 {
-       u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+       u8 l4_cksm, ipv6, ipcksm;
 
        l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
        ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
-       ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
-       if (ip_version) {
-               tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
-               udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
-       }
-       ipv6_chk = (ip_version && (tcpf || udpf));
+       ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
 
-       return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+       /* Ignore ipcksm for ipv6 pkts */
+       return l4_cksm && (ipcksm || ipv6);
 }
 
 static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
 
        skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
 
-       if (do_pkt_csum(rxcp, adapter->rx_csum))
-               skb_checksum_none_assert(skb);
-       else
+       if (likely(adapter->rx_csum && csum_passed(rxcp)))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
+       else
+               skb_checksum_none_assert(skb);
 
        skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
        return (tcp_frame && !err) ? true : false;
 }
 
-int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_poll_rx(struct napi_struct *napi, int budget)
 {
        struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
        struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
        struct be_rx_obj *rxo;
        int i;
 
+       /* when interrupts are not yet enabled, just reap any pending
+       * mcc completions */
+       if (!netif_running(adapter->netdev)) {
+               int mcc_compl, status = 0;
+
+               mcc_compl = be_process_mcc(adapter, &status);
+
+               if (mcc_compl) {
+                       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+                       be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+               }
+               goto reschedule;
+       }
+
        if (!adapter->stats_ioctl_sent)
                be_cmd_get_stats(adapter, &adapter->stats_cmd);
 
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
        if (!adapter->ue_detected)
                be_detect_dump_ue(adapter);
 
+reschedule:
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
 
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
        struct be_eq_obj *tx_eq = &adapter->tx_eq;
        int vec, i;
 
-       cancel_delayed_work_sync(&adapter->work);
-
        be_async_mcc_disable(adapter);
 
        netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
        /* Now that interrupts are on we can process async mcc */
        be_async_mcc_enable(adapter);
 
-       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-
        status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
                        &link_speed);
        if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
 
 
 #define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
-char flash_cookie[2][16] =     {"*** SE FLAS",
-                               "H DIRECTORY *** "};
-
 static bool be_flash_redboot(struct be_adapter *adapter,
                        const u8 *p, u32 img_start, int image_size,
                        int hdr_size)
@@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
        netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
                BE_NAPI_WEIGHT);
 
-       netif_carrier_off(netdev);
        netif_stop_queue(netdev);
 }
 
@@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
        if (!adapter)
                return;
 
+       cancel_delayed_work_sync(&adapter->work);
+
        unregister_netdev(adapter->netdev);
 
        be_clear(adapter);
@@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
        status = register_netdev(netdev);
        if (status != 0)
                goto unsetup;
+       netif_carrier_off(netdev);
 
        dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 
 unsetup:
index 9571ecf48f35b11a44e6cb6b6262fa8d3c4fb5b7..9eea225decaf724e7d88243fc8c462c62020f81e 100644 (file)
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
 
 #define WAIT_RAMROD_POLL       0x01
 #define WAIT_RAMROD_COMMON     0x02
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-                            int *state_p, int flags);
 
 /* dmae */
 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
                      u32 len32);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-                              u32 addr, u32 len);
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
 
 void bnx2x_calc_fc_adv(struct bnx2x *bp);
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
index bc5837514074d410fb740d57dbe90350376dc11b..459614d2d7bcfff471f08f2866a1cc667a272a76 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "bnx2x_init.h"
 
+static int bnx2x_setup_irqs(struct bnx2x *bp);
 
 /* free skb in the packet ring at pos idx
  * return idx of last bd freed
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
 }
 
 
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
 {
        int rc = 0;
        if (bp->flags & USING_MSIX_FLAG) {
index 5bfe0ab1d2d4d71cf1f801149f40de07b5f9ffad..6b28739c53028f60d5b2883a8abee0b878cb19ca 100644 (file)
@@ -116,13 +116,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
  */
 void bnx2x_int_enable(struct bnx2x *bp);
 
-/**
- * Disable HW interrupts.
- *
- * @param bp
- */
-void bnx2x_int_disable(struct bnx2x *bp);
-
 /**
  * Disable interrupts. This function ensures that there are no
  * ISRs or SP DPCs (sp_task) are running after it returns.
@@ -191,17 +184,6 @@ void bnx2x_free_mem(struct bnx2x *bp);
 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                       int is_leading);
 
-/**
- * Bring down an eth client.
- *
- * @param bp
- * @param p
- *
- * @return int
- */
-int bnx2x_stop_fw_client(struct bnx2x *bp,
-                        struct bnx2x_client_ramrod_params *p);
-
 /**
  * Set number of queues according to mode
  *
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
  */
 void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
 
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). The function will wait until the ramrod completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
-#endif
-
-/**
- * Initialize status block in FW and HW
- *
- * @param bp driver handle
- * @param dma_addr_t mapping
- * @param int sb_id
- * @param int vfid
- * @param u8 vf_valid
- * @param int fw_sb_id
- * @param int igu_sb_id
- */
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
-                         u8 vf_valid, int fw_sb_id, int igu_sb_id);
-
 /**
  * Set MAC filtering configurations.
  *
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
  * @return int
  */
 int bnx2x_func_start(struct bnx2x *bp);
-int bnx2x_func_stop(struct bnx2x *bp);
 
 /**
  * Prepare ILT configurations according to current driver
@@ -395,14 +348,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
  */
 int bnx2x_enable_msi(struct bnx2x *bp);
 
-/**
- * Request IRQ vectors from OS.
- *
- * @param bp
- *
- * @return int
- */
-int bnx2x_setup_irqs(struct bnx2x *bp);
 /**
  * NAPI callback
  *
index e65de784182c72e9e1395086b08b569ee5823328..a306b0e46b613417c630e79398c5f8250204195d 100644 (file)
@@ -16,7 +16,9 @@
 #define BNX2X_INIT_OPS_H
 
 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
-
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+                                     u32 addr, u32 len);
 
 static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
                              u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
        return rc;
 }
 
-int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
 {
        int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
        if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
        }
 }
 
-void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
                                      struct ilt_client_info *ilt_cli,
                                      u32 ilt_start, u8 initop)
 {
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
        }
 }
 
-void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
-                                 struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+                                        struct bnx2x_ilt *ilt,
+                                        struct ilt_client_info *ilt_cli,
+                                        u8 initop)
 {
        int i;
 
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
        bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
 }
 
-void bnx2x_ilt_client_init_op(struct bnx2x *bp,
-                             struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+                                    struct ilt_client_info *ilt_cli, u8 initop)
 {
        struct bnx2x_ilt *ilt = BP_ILT(bp);
 
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
        bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
 }
 
-void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
 {
        bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
        bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
  * called during init common stage, ilt clients should be initialized
  * prioir to calling this function
  */
-void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
 {
        bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
                                  PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
 #define QM_INIT(cid_cnt)       (cid_cnt > QM_INIT_MIN_CID_COUNT)
 
 /* called during init port stage */
-void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
-                            u8 initop)
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+                                   u8 initop)
 {
        int port = BP_PORT(bp);
 
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
 }
 
 /* called during init common stage */
-void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
-                            u8 initop)
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+                                   u8 initop)
 {
        if (!QM_INIT(qm_cid_count))
                return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
 ****************************************************************************/
 
 /* called during init func stage */
-void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
-                      dma_addr_t t2_mapping, int src_cid_count)
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+                             dma_addr_t t2_mapping, int src_cid_count)
 {
        int i;
        int port = BP_PORT(bp);
index 3e99bf9c42b9a83ae18437538f42322e38413d28..2326774df843841315d783dfa6c53a073a2ef4b5 100644 (file)
                (_bank + (_addr & 0xf)), \
                _val)
 
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+                         u8 devad, u16 reg, u16 *ret_val);
+
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+                          u8 devad, u16 reg, u16 val);
+
 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 {
        u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
        return 0;
 }
 
-u8 bnx2x_bmac_enable(struct link_params *params,
+static u8 bnx2x_bmac_enable(struct link_params *params,
                            struct link_vars *vars,
                            u8 is_lb)
 {
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
        }
 }
 
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requested led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
-                         u32 led_idx, u32 value)
-{
-       u32 reg_val;
-
-       /* If port 0 then use EMAC0, else use EMAC1*/
-       u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
-       DP(NETIF_MSG_LINK,
-                "bnx2x_override_led_value() port %x led_idx %d value %d\n",
-                port, led_idx, value);
-
-       switch (led_idx) {
-       case 0: /* 10MB led */
-               /* Read the current value of the LED register in
-               the EMAC block */
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /* Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /* If value is 1, set the 10M_OVERRIDE bit,
-               otherwise reset it.*/
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_10MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 1: /*100MB led    */
-               /*Read the current value of the LED register in
-               the EMAC block */
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /*  Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /*  If value is 1, set the 100M_OVERRIDE bit,
-               otherwise reset it.*/
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_100MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 2: /* 1000MB led */
-               /* Read the current value of the LED register in the
-               EMAC block */
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /* Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
-               reset it. */
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 3: /* 2500MB led */
-               /*  Read the current value of the LED register in the
-               EMAC block*/
-               reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
-               /* Set the OVERRIDE bit to 1 */
-               reg_val |= EMAC_LED_OVERRIDE;
-               /*  If value is 1, set the 2500M_OVERRIDE bit, otherwise
-               reset it.*/
-               reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
-                       (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
-               REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               break;
-       case 4: /*10G led */
-               if (port == 0) {
-                       REG_WR(bp, NIG_REG_LED_10G_P0,
-                                   value);
-               } else {
-                       REG_WR(bp, NIG_REG_LED_10G_P1,
-                                   value);
-               }
-               break;
-       case 5: /* TRAFFIC led */
-               /* Find if the traffic control is via BMAC or EMAC */
-               if (port == 0)
-                       reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
-               else
-                       reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
-
-               /*  Override the traffic led in the EMAC:*/
-               if (reg_val == 1) {
-                       /* Read the current value of the LED register in
-                       the EMAC block */
-                       reg_val = REG_RD(bp, emac_base +
-                                            EMAC_REG_EMAC_LED);
-                       /* Set the TRAFFIC_OVERRIDE bit to 1 */
-                       reg_val |= EMAC_LED_OVERRIDE;
-                       /* If value is 1, set the TRAFFIC bit, otherwise
-                       reset it.*/
-                       reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
-                               (reg_val & ~EMAC_LED_TRAFFIC);
-                       REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
-               } else { /* Override the traffic led in the BMAC: */
-                       REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
-                                  + port*4, 1);
-                       REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
-                                   value);
-               }
-               break;
-       default:
-               DP(NETIF_MSG_LINK,
-                        "bnx2x_override_led_value() unknown led index %d "
-                        "(should be 0-5)\n", led_idx);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-
 u8 bnx2x_set_led(struct link_params *params,
                 struct link_vars *vars, u8 mode, u32 speed)
 {
@@ -4099,9 +3989,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-                               struct link_params *params, u16 addr,
-                                    u8 byte_cnt, u8 *o_buf)
+static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+                                      struct link_params *params, u16 addr,
+                                      u8 byte_cnt, u8 *o_buf)
 {
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
                return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -6819,13 +6709,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
        return 0;
 }
 
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
-{
-       if (phy_idx < params->num_phys)
-               return params->phy[phy_idx].supported;
-       return 0;
-}
-
 static void set_phy_vars(struct link_params *params)
 {
        struct bnx2x *bp = params->bp;
index 58a4c719927633a0a525546238b271c8eaf08a85..171abf8097ee3e509ddc8fbe384d8be1087f2562 100644 (file)
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
 
 u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
                   u8 devad, u16 reg, u16 val);
-
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-                  u8 devad, u16 reg, u16 *ret_val);
-
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-                   u8 devad, u16 reg, u16 val);
 /* Reads the link_status from the shmem,
    and update the link vars accordingly */
 void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
 #define LED_MODE_OPER                  2
 #define LED_MODE_FRONT_PANEL_OFF       3
 
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
-
 /* bnx2x_handle_module_detect_int should be called upon module detection
    interrupt */
 void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
 /* Reset the external of SFX7101 */
 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
 
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-                               struct link_params *params, u16 addr,
-                             u8 byte_cnt, u8 *o_buf);
-
 void bnx2x_hw_reset_phy(struct link_params *params);
 
 /* Checks if HW lock is required for this phy/board type */
 u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
                          u32 shmem2_base);
 
-/* Returns the aggregative supported attributes of the phys on board */
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
-
 /* Check swap bit and adjust PHY order */
 u32 bnx2x_phy_selection(struct link_params *params);
 
index ff99a2fc04267b6aef6d926cbc5483d12fbb2635..e9ad16f00b56755847eb887a898c999ef2c08c04 100644 (file)
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
 /* used only at init
  * locking is done by mcp
  */
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
 {
        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
        pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 #define DMAE_DP_DST_PCI                "pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE       "dst_addr [none]"
 
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+                         int msglvl)
 {
        u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
 
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
        return opcode;
 }
 
-void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
-                              u8 src_type, u8 dst_type)
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+                                     struct dmae_command *dmae,
+                                     u8 src_type, u8 dst_type)
 {
        memset(dmae, 0, sizeof(struct dmae_command));
 
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
 }
 
 /* issue a dmae command over the init-channel and wailt for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+                                     struct dmae_command *dmae)
 {
        u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
 
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-                              u32 addr, u32 len)
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+                                     u32 addr, u32 len)
 {
        int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
        int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
                BNX2X_ERR("BUG! proper val not read from IGU!\n");
 }
 
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
 {
        if (bp->common.int_block == INT_BLOCK_HC)
                bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
 }
 
 /* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
 {
        u32 mask = (1 << cl_id);
 
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
                bp->mac_filters.unmatched_unicast & ~mask;
 }
 
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
        struct tstorm_eth_function_common_config tcfg = {0};
        u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
        txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
 }
 
-void bnx2x_pf_init(struct bnx2x *bp)
+static void bnx2x_pf_init(struct bnx2x *bp)
 {
        struct bnx2x_func_init_params func_init = {0};
        struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
        hc_sm->time_to_expire = 0xFFFFFFFF;
 }
 
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                          u8 vf_valid, int fw_sb_id, int igu_sb_id)
 {
        int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
 /*
  * Init service functions
  */
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+                            int *state_p, int flags);
+
 int bnx2x_func_start(struct bnx2x *bp)
 {
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
                                 WAIT_RAMROD_COMMON);
 }
 
-int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
 {
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
 
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
        bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
 }
 
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-                     int *state_p, int flags)
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+                            int *state_p, int flags)
 {
        /* can take a while if any port is running */
        int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
        return -EBUSY;
 }
 
-u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
+static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
 {
        if (CHIP_IS_E1H(bp))
                return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
  *
  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
  */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
 {
        u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
                         bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
                                       ETH_CONNECTION_TYPE);
 }
 
-int bnx2x_setup_fw_client(struct bnx2x *bp,
-                         struct bnx2x_client_init_params *params,
-                         u8 activate,
-                         struct client_init_ramrod_data *data,
-                         dma_addr_t data_mapping)
+static int bnx2x_setup_fw_client(struct bnx2x *bp,
+                                struct bnx2x_client_init_params *params,
+                                u8 activate,
+                                struct client_init_ramrod_data *data,
+                                dma_addr_t data_mapping)
 {
        u16 hc_usec;
        int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        return rc;
 }
 
-int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_fw_client(struct bnx2x *bp,
+                               struct bnx2x_client_ramrod_params *p)
 {
        int rc;
 
@@ -7440,7 +7447,7 @@ reset_task_exit:
  * Init service functions
  */
 
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
 {
        u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
        u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
index beb3b7cecd52aff51d49d6e2a6cc35ea19e911f5..bdb68a600382bdcfbe8a9e2cf70f8360821e2567 100644 (file)
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
        struct slave *slave;
        int i;
 
-       write_lock(&bond->lock);
+       write_lock_bh(&bond->lock);
        bond->vlgrp = grp;
-       write_unlock(&bond->lock);
+       write_unlock_bh(&bond->lock);
 
        bond_for_each_slave(bond, slave, i) {
                struct net_device *slave_dev = slave->dev;
index 75bfc3a9d95f3f118e6eae1cdc649fd0a442baeb..09ed3f42d673b4631366f24a147f21c353ef9f2a 100644 (file)
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
        Putting the next command and length in the start of the frame can
        help to synchronize to the next transfer in case of over or under-runs.
        This option also needs to be enabled on the modem.
+
+config CAIF_SHM
+       tristate "CAIF shared memory protocol driver"
+       depends on CAIF && U5500_MBOX
+       default n
+       ---help---
+       The CAIF shared memory protocol driver for the STE UX5500 platform.
index 3a11d619452bd1e64bf2dc22d3def54695667a43..b38d987da67d9c810d069ec9303313aa2978243d 100644 (file)
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
 # SPI slave physical interfaces module
 cfspi_slave-objs := caif_spi.o caif_spi_slave.o
 obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# Shared memory
+caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
+obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644 (file)
index 0000000..1cd90da
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <mach/mbox.h>
+#include <net/caif/caif_shm.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
+
+#define MAX_SHM_INSTANCES      1
+
+enum {
+       MBX_ACC0,
+       MBX_ACC1,
+       MBX_DSP
+};
+
+static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
+
+static unsigned int shm_start;
+static unsigned int shm_size;
+
+module_param(shm_size, uint  , 0440);
+MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
+
+module_param(shm_start, uint  , 0440);
+MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
+
+static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
+{
+       /* Always block until msg is written successfully */
+       mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
+       return 0;
+}
+
+static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
+                                                        void *pshm_drv)
+{
+       /*
+        * For UX5500, we have only 1 SHM instance which uses MBX0
+        * for communication with the peer modem
+        */
+       pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
+
+       if (!pshm_dev->hmbx)
+               return -ENODEV;
+       else
+               return 0;
+}
+
+static int __init caif_shmdev_init(void)
+{
+       int i, result;
+
+       /* Loop is currently overkill, there is only one instance */
+       for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+
+               shmdev_lyr[i].shm_base_addr = shm_start;
+               shmdev_lyr[i].shm_total_sz = shm_size;
+
+               if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
+                              || (shmdev_lyr[i].shm_total_sz <= 0))    {
+                       pr_warn("ERROR,"
+                               "Shared memory Address and/or Size incorrect"
+                               ", Bailing out ...\n");
+                       result = -EINVAL;
+                       goto clean;
+               }
+
+               pr_info("SHM AREA (instance %d) STARTS"
+                       " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
+
+               shmdev_lyr[i].shm_id = i;
+               shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
+               shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
+
+               /*
+                * Finally, CAIF core module is called with details in place:
+                * 1. SHM base address
+                * 2. SHM size
+                * 3. MBX handle
+                */
+               result = caif_shmcore_probe(&shmdev_lyr[i]);
+               if (result) {
+                       pr_warn("ERROR[%d],"
+                               "Could not probe SHM core (instance %d)"
+                               " Bailing out ...\n", result, i);
+                       goto clean;
+               }
+       }
+
+       return 0;
+
+clean:
+       /*
+        * For now, we assume that even if one instance of SHM fails, we bail
+        * out of the driver support completely. For this, we need to release
+        * any memory allocated and unregister any instance of SHM net device.
+        */
+       for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+               if (shmdev_lyr[i].pshm_netdev)
+                       unregister_netdev(shmdev_lyr[i].pshm_netdev);
+       }
+       return result;
+}
+
+static void __exit caif_shmdev_exit(void)
+{
+       int i;
+
+       for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+               caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
+               kfree((void *)shmdev_lyr[i].shm_base_addr);
+       }
+
+}
+
+module_init(caif_shmdev_init);
+module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644 (file)
index 0000000..19f9c06
--- /dev/null
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
+ *           Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_shm.h>
+
+#define NR_TX_BUF              6
+#define NR_RX_BUF              6
+#define TX_BUF_SZ              0x2000
+#define RX_BUF_SZ              0x2000
+
+#define CAIF_NEEDED_HEADROOM   32
+
+#define CAIF_FLOW_ON           1
+#define CAIF_FLOW_OFF          0
+
+#define LOW_WATERMARK          3
+#define HIGH_WATERMARK         4
+
+/* Maximum number of CAIF buffers per shared memory buffer. */
+#define SHM_MAX_FRMS_PER_BUF   10
+
+/*
+ * Size in bytes of the descriptor area
+ * (With end of descriptor signalling)
+ */
+#define SHM_CAIF_DESC_SIZE     ((SHM_MAX_FRMS_PER_BUF + 1) * \
+                                       sizeof(struct shm_pck_desc))
+
+/*
+ * Offset to the first CAIF frame within a shared memory buffer.
+ * Aligned on 32 bytes.
+ */
+#define SHM_CAIF_FRM_OFS       (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
+
+/* Number of bytes for CAIF shared memory header. */
+#define SHM_HDR_LEN            1
+
+/* Number of padding bytes for the complete CAIF frame. */
+#define SHM_FRM_PAD_LEN                4
+
+#define CAIF_MAX_MTU           4096
+
+#define SHM_SET_FULL(x)        (((x+1) & 0x0F) << 0)
+#define SHM_GET_FULL(x)        (((x >> 0) & 0x0F) - 1)
+
+#define SHM_SET_EMPTY(x)       (((x+1) & 0x0F) << 4)
+#define SHM_GET_EMPTY(x)       (((x >> 4) & 0x0F) - 1)
+
+#define SHM_FULL_MASK          (0x0F << 0)
+#define SHM_EMPTY_MASK         (0x0F << 4)
+
+struct shm_pck_desc {
+       /*
+        * Offset from start of shared memory area to start of
+        * shared memory CAIF frame.
+        */
+       u32 frm_ofs;
+       u32 frm_len;
+};
+
+struct buf_list {
+       unsigned char *desc_vptr;
+       u32 phy_addr;
+       u32 index;
+       u32 len;
+       u32 frames;
+       u32 frm_ofs;
+       struct list_head list;
+};
+
+struct shm_caif_frm {
+       /* Number of bytes of padding before the CAIF frame. */
+       u8 hdr_ofs;
+};
+
+struct shmdrv_layer {
+       /* caif_dev_common must always be first in the structure*/
+       struct caif_dev_common cfdev;
+
+       u32 shm_tx_addr;
+       u32 shm_rx_addr;
+       u32 shm_base_addr;
+       u32 tx_empty_available;
+       spinlock_t lock;
+
+       struct list_head tx_empty_list;
+       struct list_head tx_pend_list;
+       struct list_head tx_full_list;
+       struct list_head rx_empty_list;
+       struct list_head rx_pend_list;
+       struct list_head rx_full_list;
+
+       struct workqueue_struct *pshm_tx_workqueue;
+       struct workqueue_struct *pshm_rx_workqueue;
+
+       struct work_struct shm_tx_work;
+       struct work_struct shm_rx_work;
+
+       struct sk_buff_head sk_qhead;
+       struct shmdev_layer *pshm_dev;
+};
+
+static int shm_netdev_open(struct net_device *shm_netdev)
+{
+       netif_wake_queue(shm_netdev);
+       return 0;
+}
+
+static int shm_netdev_close(struct net_device *shm_netdev)
+{
+       netif_stop_queue(shm_netdev);
+       return 0;
+}
+
+int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
+{
+       struct buf_list *pbuf;
+       struct shmdrv_layer *pshm_drv;
+       struct list_head *pos;
+       u32 avail_emptybuff = 0;
+       unsigned long flags = 0;
+
+       pshm_drv = (struct shmdrv_layer *)priv;
+
+       /* Check for received buffers. */
+       if (mbx_msg & SHM_FULL_MASK) {
+               int idx;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check whether we have any outstanding buffers. */
+               if (list_empty(&pshm_drv->rx_empty_list)) {
+
+                       /* Release spin lock. */
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* We print even in IRQ context... */
+                       pr_warn("No empty Rx buffers to fill: "
+                                       "mbx_msg:%x\n", mbx_msg);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+
+               pbuf =
+                       list_entry(pshm_drv->rx_empty_list.next,
+                                       struct buf_list, list);
+               idx = pbuf->index;
+
+               /* Check buffer synchronization. */
+               if (idx != SHM_GET_FULL(mbx_msg)) {
+
+                       /* We print even in IRQ context... */
+                       pr_warn(
+                       "phyif_shm_mbx_msg_cb: RX full out of sync:"
+                       " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
+                               idx, mbx_msg, SHM_GET_FULL(mbx_msg));
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+
+               list_del_init(&pbuf->list);
+               list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
+
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+               /* Schedule RX work queue. */
+               if (!work_pending(&pshm_drv->shm_rx_work))
+                       queue_work(pshm_drv->pshm_rx_workqueue,
+                                               &pshm_drv->shm_rx_work);
+       }
+
+       /* Check for emptied buffers. */
+       if (mbx_msg & SHM_EMPTY_MASK) {
+               int idx;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check whether we have any outstanding buffers. */
+               if (list_empty(&pshm_drv->tx_full_list)) {
+
+                       /* We print even in IRQ context... */
+                       pr_warn("No TX to empty: msg:%x\n", mbx_msg);
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+
+               pbuf =
+                       list_entry(pshm_drv->tx_full_list.next,
+                                       struct buf_list, list);
+               idx = pbuf->index;
+
+               /* Check buffer synchronization. */
+               if (idx != SHM_GET_EMPTY(mbx_msg)) {
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* We print even in IRQ context... */
+                       pr_warn("TX empty "
+                               "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
+
+                       /* Bail out. */
+                       goto err_sync;
+               }
+               list_del_init(&pbuf->list);
+
+               /* Reset buffer parameters. */
+               pbuf->frames = 0;
+               pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+               list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
+
+               /* Check the available no. of buffers in the empty list */
+               list_for_each(pos, &pshm_drv->tx_empty_list)
+                       avail_emptybuff++;
+
+               /* Check whether we have to wake up the transmitter. */
+               if ((avail_emptybuff > HIGH_WATERMARK) &&
+                                       (!pshm_drv->tx_empty_available)) {
+                       pshm_drv->tx_empty_available = 1;
+                       pshm_drv->cfdev.flowctrl
+                                       (pshm_drv->pshm_dev->pshm_netdev,
+                                                               CAIF_FLOW_ON);
+
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+                       /* Schedule the work queue. if required */
+                       if (!work_pending(&pshm_drv->shm_tx_work))
+                               queue_work(pshm_drv->pshm_tx_workqueue,
+                                                       &pshm_drv->shm_tx_work);
+               } else
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+       }
+
+       return 0;
+
+err_sync:
+       return -EIO;
+}
+
+static void shm_rx_work_func(struct work_struct *rx_work)
+{
+       struct shmdrv_layer *pshm_drv;
+       struct buf_list *pbuf;
+       unsigned long flags = 0;
+       struct sk_buff *skb;
+       char *p;
+       int ret;
+
+       pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
+
+       while (1) {
+
+               struct shm_pck_desc *pck_desc;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check for received buffers. */
+               if (list_empty(&pshm_drv->rx_full_list)) {
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+                       break;
+               }
+
+               pbuf =
+                       list_entry(pshm_drv->rx_full_list.next, struct buf_list,
+                                       list);
+               list_del_init(&pbuf->list);
+
+               /* Retrieve pointer to start of the packet descriptor area. */
+               pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
+
+               /*
+                * Check whether descriptor contains a CAIF shared memory
+                * frame.
+                */
+               while (pck_desc->frm_ofs) {
+                       unsigned int frm_buf_ofs;
+                       unsigned int frm_pck_ofs;
+                       unsigned int frm_pck_len;
+                       /*
+                        * Check whether offset is within buffer limits
+                        * (lower).
+                        */
+                       if (pck_desc->frm_ofs <
+                               (pbuf->phy_addr - pshm_drv->shm_base_addr))
+                               break;
+                       /*
+                        * Check whether offset is within buffer limits
+                        * (higher).
+                        */
+                       if (pck_desc->frm_ofs >
+                               ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
+                                       pbuf->len))
+                               break;
+
+                       /* Calculate offset from start of buffer. */
+                       frm_buf_ofs =
+                               pck_desc->frm_ofs - (pbuf->phy_addr -
+                                               pshm_drv->shm_base_addr);
+
+                       /*
+                        * Calculate offset and length of CAIF packet while
+                        * taking care of the shared memory header.
+                        */
+                       frm_pck_ofs =
+                               frm_buf_ofs + SHM_HDR_LEN +
+                               (*(pbuf->desc_vptr + frm_buf_ofs));
+                       frm_pck_len =
+                               (pck_desc->frm_len - SHM_HDR_LEN -
+                               (*(pbuf->desc_vptr + frm_buf_ofs)));
+
+                       /* Check whether CAIF packet is within buffer limits */
+                       if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
+                               break;
+
+                       /* Get a suitable CAIF packet and copy in data. */
+                       skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
+                                                       frm_pck_len + 1);
+                       BUG_ON(skb == NULL);
+
+                       p = skb_put(skb, frm_pck_len);
+                       memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
+
+                       skb->protocol = htons(ETH_P_CAIF);
+                       skb_reset_mac_header(skb);
+                       skb->dev = pshm_drv->pshm_dev->pshm_netdev;
+
+                       /* Push received packet up the stack. */
+                       ret = netif_rx_ni(skb);
+
+                       if (!ret) {
+                               pshm_drv->pshm_dev->pshm_netdev->stats.
+                                                               rx_packets++;
+                               pshm_drv->pshm_dev->pshm_netdev->stats.
+                                               rx_bytes += pck_desc->frm_len;
+                       } else
+                               ++pshm_drv->pshm_dev->pshm_netdev->stats.
+                                                               rx_dropped;
+                       /* Move to next packet descriptor. */
+                       pck_desc++;
+               }
+
+               list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
+
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+       }
+
+       /* Schedule the work queue. if required */
+       if (!work_pending(&pshm_drv->shm_tx_work))
+               queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+}
+
+static void shm_tx_work_func(struct work_struct *tx_work)
+{
+       u32 mbox_msg;
+       unsigned int frmlen, avail_emptybuff, append = 0;
+       unsigned long flags = 0;
+       struct buf_list *pbuf = NULL;
+       struct shmdrv_layer *pshm_drv;
+       struct shm_caif_frm *frm;
+       struct sk_buff *skb;
+       struct shm_pck_desc *pck_desc;
+       struct list_head *pos;
+
+       pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
+
+       do {
+               /* Initialize mailbox message. */
+               mbox_msg = 0x00;
+               avail_emptybuff = 0;
+
+               spin_lock_irqsave(&pshm_drv->lock, flags);
+
+               /* Check for pending receive buffers. */
+               if (!list_empty(&pshm_drv->rx_pend_list)) {
+
+                       pbuf = list_entry(pshm_drv->rx_pend_list.next,
+                                               struct buf_list, list);
+
+                       list_del_init(&pbuf->list);
+                       list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
+                       /*
+                        * Value index is never changed,
+                        * so read access should be safe.
+                        */
+                       mbox_msg |= SHM_SET_EMPTY(pbuf->index);
+               }
+
+               skb = skb_peek(&pshm_drv->sk_qhead);
+
+               if (skb == NULL)
+                       goto send_msg;
+
+               /* Check the available no. of buffers in the empty list */
+               list_for_each(pos, &pshm_drv->tx_empty_list)
+                       avail_emptybuff++;
+
+               if ((avail_emptybuff < LOW_WATERMARK) &&
+                                       pshm_drv->tx_empty_available) {
+                       /* Update blocking condition. */
+                       pshm_drv->tx_empty_available = 0;
+                       pshm_drv->cfdev.flowctrl
+                                       (pshm_drv->pshm_dev->pshm_netdev,
+                                       CAIF_FLOW_OFF);
+               }
+               /*
+                * We simply return back to the caller if we do not have space
+                * either in Tx pending list or Tx empty list. In this case,
+                * we hold the received skb in the skb list, waiting to
+                * be transmitted once Tx buffers become available
+                */
+               if (list_empty(&pshm_drv->tx_empty_list))
+                       goto send_msg;
+
+               /* Get the first free Tx buffer. */
+               pbuf = list_entry(pshm_drv->tx_empty_list.next,
+                                               struct buf_list, list);
+               do {
+                       if (append) {
+                               skb = skb_peek(&pshm_drv->sk_qhead);
+                               if (skb == NULL)
+                                       break;
+                       }
+
+                       frm = (struct shm_caif_frm *)
+                                       (pbuf->desc_vptr + pbuf->frm_ofs);
+
+                       frm->hdr_ofs = 0;
+                       frmlen = 0;
+                       frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
+
+                       /* Add tail padding if needed. */
+                       if (frmlen % SHM_FRM_PAD_LEN)
+                               frmlen += SHM_FRM_PAD_LEN -
+                                               (frmlen % SHM_FRM_PAD_LEN);
+
+                       /*
+                        * Verify that packet, header and additional padding
+                        * can fit within the buffer frame area.
+                        */
+                       if (frmlen >= (pbuf->len - pbuf->frm_ofs))
+                               break;
+
+                       if (!append) {
+                               list_del_init(&pbuf->list);
+                               append = 1;
+                       }
+
+                       skb = skb_dequeue(&pshm_drv->sk_qhead);
+                       /* Copy in CAIF frame. */
+                       skb_copy_bits(skb, 0, pbuf->desc_vptr +
+                                       pbuf->frm_ofs + SHM_HDR_LEN +
+                                               frm->hdr_ofs, skb->len);
+
+                       pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
+                       pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
+                                                                       frmlen;
+                       dev_kfree_skb(skb);
+
+                       /* Fill in the shared memory packet descriptor area. */
+                       pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
+                       /* Forward to current frame. */
+                       pck_desc += pbuf->frames;
+                       pck_desc->frm_ofs = (pbuf->phy_addr -
+                                               pshm_drv->shm_base_addr) +
+                                                               pbuf->frm_ofs;
+                       pck_desc->frm_len = frmlen;
+                       /* Terminate packet descriptor area. */
+                       pck_desc++;
+                       pck_desc->frm_ofs = 0;
+                       /* Update buffer parameters. */
+                       pbuf->frames++;
+                       pbuf->frm_ofs += frmlen + (frmlen % 32);
+
+               } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+
+               /* Assign buffer as full. */
+               list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
+               append = 0;
+               mbox_msg |= SHM_SET_FULL(pbuf->index);
+send_msg:
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+               if (mbox_msg)
+                       pshm_drv->pshm_dev->pshmdev_mbxsend
+                                       (pshm_drv->pshm_dev->shm_id, mbox_msg);
+       } while (mbox_msg);
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+       struct shmdrv_layer *pshm_drv;
+       unsigned long flags = 0;
+
+       pshm_drv = netdev_priv(shm_netdev);
+
+       spin_lock_irqsave(&pshm_drv->lock, flags);
+
+       skb_queue_tail(&pshm_drv->sk_qhead, skb);
+
+       spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+       /* Schedule Tx work queue. for deferred processing of skbs*/
+       if (!work_pending(&pshm_drv->shm_tx_work))
+               queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+       return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+       .ndo_open = shm_netdev_open,
+       .ndo_stop = shm_netdev_close,
+       .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+       struct shmdrv_layer *pshm_drv;
+       pshm_netdev->netdev_ops = &netdev_ops;
+
+       pshm_netdev->mtu = CAIF_MAX_MTU;
+       pshm_netdev->type = ARPHRD_CAIF;
+       pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+       pshm_netdev->tx_queue_len = 0;
+       pshm_netdev->destructor = free_netdev;
+
+       pshm_drv = netdev_priv(pshm_netdev);
+
+       /* Initialize structures in a clean state. */
+       memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
+
+       pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+}
+
+int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
+{
+       int result, j;
+       struct shmdrv_layer *pshm_drv = NULL;
+
+       pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
+                                               "cfshm%d", shm_netdev_setup);
+       if (!pshm_dev->pshm_netdev)
+               return -ENOMEM;
+
+       pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
+       pshm_drv->pshm_dev = pshm_dev;
+
+       /*
+        * Initialization starts with the verification of the
+        * availability of MBX driver by calling its setup function.
+        * MBX driver must be available by this time for proper
+        * functioning of SHM driver.
+        */
+       if ((pshm_dev->pshmdev_mbxsetup
+                               (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
+               pr_warn("Could not config. SHM Mailbox,"
+                               " Bailing out.....\n");
+               free_netdev(pshm_dev->pshm_netdev);
+               return -ENODEV;
+       }
+
+       skb_queue_head_init(&pshm_drv->sk_qhead);
+
+       pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
+                       " INSTANCE AT pshm_drv =0x%p\n",
+                       pshm_drv->pshm_dev->shm_id, pshm_drv);
+
+       if (pshm_dev->shm_total_sz <
+                       (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
+
+               pr_warn("ERROR, Amount of available"
+                               " Phys. SHM cannot accomodate current SHM "
+                               "driver configuration, Bailing out ...\n");
+               free_netdev(pshm_dev->pshm_netdev);
+               return -ENOMEM;
+       }
+
+       pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
+       pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
+
+       if (pshm_dev->shm_loopback)
+               pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
+       else
+               pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
+                                               (NR_TX_BUF * TX_BUF_SZ);
+
+       INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
+       INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
+       INIT_LIST_HEAD(&pshm_drv->tx_full_list);
+
+       INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
+       INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
+       INIT_LIST_HEAD(&pshm_drv->rx_full_list);
+
+       INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
+       INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+
+       pshm_drv->pshm_tx_workqueue =
+                               create_singlethread_workqueue("shm_tx_work");
+       pshm_drv->pshm_rx_workqueue =
+                               create_singlethread_workqueue("shm_rx_work");
+
+       for (j = 0; j < NR_TX_BUF; j++) {
+               struct buf_list *tx_buf =
+                               kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+               if (tx_buf == NULL) {
+                       pr_warn("ERROR, Could not"
+                                       " allocate dynamic mem. for tx_buf,"
+                                       " Bailing out ...\n");
+                       free_netdev(pshm_dev->pshm_netdev);
+                       return -ENOMEM;
+               }
+               tx_buf->index = j;
+               tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
+               tx_buf->len = TX_BUF_SZ;
+               tx_buf->frames = 0;
+               tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+               if (pshm_dev->shm_loopback)
+                       tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+               else
+                       tx_buf->desc_vptr =
+                                       ioremap(tx_buf->phy_addr, TX_BUF_SZ);
+
+               list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
+       }
+
+       for (j = 0; j < NR_RX_BUF; j++) {
+               struct buf_list *rx_buf =
+                               kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+               if (rx_buf == NULL) {
+                       pr_warn("ERROR, Could not"
+                                       " allocate dynamic mem.for rx_buf,"
+                                       " Bailing out ...\n");
+                       free_netdev(pshm_dev->pshm_netdev);
+                       return -ENOMEM;
+               }
+               rx_buf->index = j;
+               rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
+               rx_buf->len = RX_BUF_SZ;
+
+               if (pshm_dev->shm_loopback)
+                       rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+               else
+                       rx_buf->desc_vptr =
+                                       ioremap(rx_buf->phy_addr, RX_BUF_SZ);
+               list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
+       }
+
+       pshm_drv->tx_empty_available = 1;
+       result = register_netdev(pshm_dev->pshm_netdev);
+       if (result)
+               pr_warn("ERROR[%d], SHM could not, "
+                       "register with NW FRMWK Bailing out ...\n", result);
+
+       return result;
+}
+
+void caif_shmcore_remove(struct net_device *pshm_netdev)
+{
+       struct buf_list *pbuf;
+       struct shmdrv_layer *pshm_drv = NULL;
+
+       pshm_drv = netdev_priv(pshm_netdev);
+
+       while (!(list_empty(&pshm_drv->tx_pend_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_pend_list.next,
+                                       struct buf_list, list);
+
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->tx_full_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_full_list.next,
+                                       struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->tx_empty_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_empty_list.next,
+                                       struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->rx_full_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_full_list.next,
+                               struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->rx_pend_list))) {
+               pbuf =
+                       list_entry(pshm_drv->tx_pend_list.next,
+                               struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       while (!(list_empty(&pshm_drv->rx_empty_list))) {
+               pbuf =
+                       list_entry(pshm_drv->rx_empty_list.next,
+                               struct buf_list, list);
+               list_del(&pbuf->list);
+               kfree(pbuf);
+       }
+
+       /* Destroy work queues. */
+       destroy_workqueue(pshm_drv->pshm_tx_workqueue);
+       destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+
+       unregister_netdev(pshm_netdev);
+}
index 9d9e45394433f134ef8a344e69fe1d42039e7fcc..080574b0fff0c222510df389641c669d5fad1429 100644 (file)
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
        ---help---
          Say Y here if you want to support for Freescale FlexCAN.
 
+config PCH_CAN
+       tristate "PCH CAN"
+       depends on CAN_DEV && PCI
+       ---help---
+         This driver is for PCH CAN of Topcliff which is an IOH for x86
+         embedded processor.
+         This driver can access CAN bus.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
index 00575373bbd0eed976566419be2ed9c9e34c8cf2..90af15a4f106a7031a8eafce16b6f2b9d3cc0eaf 100644 (file)
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X)     += mcp251x.o
 obj-$(CONFIG_CAN_BFIN)         += bfin_can.o
 obj-$(CONFIG_CAN_JANZ_ICAN3)   += janz-ican3.o
 obj-$(CONFIG_CAN_FLEXCAN)      += flexcan.o
+obj-$(CONFIG_PCH_CAN)          += pch_can.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
index 2d8bd86bc5e2072bf66d4966d5042869044fb0b9..cee98fa668bd703c9e34356acb9e6350c0ad80f5 100644 (file)
@@ -2,7 +2,7 @@
  * at91_can.c - CAN network driver for AT91 SoC CAN controller
  *
  * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
- * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
  *
  * This software may be distributed under the terms of the GNU General
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
 
 #include <mach/board.h>
 
-#define DRV_NAME               "at91_can"
 #define AT91_NAPI_WEIGHT       12
 
 /*
@@ -172,6 +171,7 @@ struct at91_priv {
 };
 
 static struct can_bittiming_const at91_bittiming_const = {
+       .name           = KBUILD_MODNAME,
        .tseg1_min      = 4,
        .tseg1_max      = 16,
        .tseg2_min      = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
 
 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
 {
-       return readl(priv->reg_base + reg);
+       return __raw_readl(priv->reg_base + reg);
 }
 
 static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
                u32 value)
 {
-       writel(value, priv->reg_base + reg);
+       __raw_writel(value, priv->reg_base + reg);
 }
 
 static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
                set_mb_mode(priv, i, AT91_MB_MODE_RX);
        set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
 
+       /* reset acceptance mask and id register */
+       for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
+               at91_write(priv, AT91_MAM(i), 0x0 );
+               at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
+       }
+
        /* The last 4 mailboxes are used for transmitting. */
        for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
                set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
        const struct can_bittiming *bt = &priv->can.bittiming;
        u32 reg_br;
 
-       reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
-               ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+       reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
+               ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
                ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
                ((bt->phase_seg2 - 1) << 0);
 
-       dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+       netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
 
        at91_write(priv, AT91_BR, reg_br);
 
        return 0;
 }
 
+static int at91_get_berr_counter(const struct net_device *dev,
+               struct can_berr_counter *bec)
+{
+       const struct at91_priv *priv = netdev_priv(dev);
+       u32 reg_ecr = at91_read(priv, AT91_ECR);
+
+       bec->rxerr = reg_ecr & 0xff;
+       bec->txerr = reg_ecr >> 16;
+
+       return 0;
+}
+
 static void at91_chip_start(struct net_device *dev)
 {
        struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
        reg_mr = at91_read(priv, AT91_MR);
        at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
 
+       at91_set_bittiming(dev);
        at91_setup_mailboxes(dev);
        at91_transceiver_switch(priv, 1);
 
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
                netif_stop_queue(dev);
 
-               dev_err(dev->dev.parent,
-                       "BUG! TX buffer full when queue awake!\n");
+               netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
                return NETDEV_TX_BUSY;
        }
 
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
        struct sk_buff *skb;
        struct can_frame *cf;
 
-       dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+       netdev_dbg(dev, "RX buffer overflow\n");
        stats->rx_over_errors++;
        stats->rx_errors++;
 
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
        *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
        *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
 
+       /* allow RX of extended frames */
+       at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
+
        if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
                at91_rx_overflow_err(dev);
 }
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
 
        if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
            reg_sr & AT91_MB_RX_LOW_MASK)
-               dev_info(dev->dev.parent,
-                        "order of incoming frames cannot be guaranteed\n");
+               netdev_info(dev,
+                       "order of incoming frames cannot be guaranteed\n");
 
  again:
        for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* CRC error */
        if (reg_sr & AT91_IRQ_CERR) {
-               dev_dbg(dev->dev.parent, "CERR irq\n");
+               netdev_dbg(dev, "CERR irq\n");
                dev->stats.rx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* Stuffing Error */
        if (reg_sr & AT91_IRQ_SERR) {
-               dev_dbg(dev->dev.parent, "SERR irq\n");
+               netdev_dbg(dev, "SERR irq\n");
                dev->stats.rx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* Acknowledgement Error */
        if (reg_sr & AT91_IRQ_AERR) {
-               dev_dbg(dev->dev.parent, "AERR irq\n");
+               netdev_dbg(dev, "AERR irq\n");
                dev->stats.tx_errors++;
                cf->can_id |= CAN_ERR_ACK;
        }
 
        /* Form error */
        if (reg_sr & AT91_IRQ_FERR) {
-               dev_dbg(dev->dev.parent, "FERR irq\n");
+               netdev_dbg(dev, "FERR irq\n");
                dev->stats.rx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
 
        /* Bit Error */
        if (reg_sr & AT91_IRQ_BERR) {
-               dev_dbg(dev->dev.parent, "BERR irq\n");
+               netdev_dbg(dev, "BERR irq\n");
                dev->stats.tx_errors++;
                priv->can.can_stats.bus_error++;
                cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
                struct can_frame *cf, enum can_state new_state)
 {
        struct at91_priv *priv = netdev_priv(dev);
-       u32 reg_idr, reg_ier, reg_ecr;
-       u8 tec, rec;
+       u32 reg_idr = 0, reg_ier = 0;
+       struct can_berr_counter bec;
 
-       reg_ecr = at91_read(priv, AT91_ECR);
-       rec = reg_ecr & 0xff;
-       tec = reg_ecr >> 16;
+       at91_get_berr_counter(dev, &bec);
 
        switch (priv->can.state) {
        case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
                 */
                if (new_state >= CAN_STATE_ERROR_WARNING &&
                    new_state <= CAN_STATE_BUS_OFF) {
-                       dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+                       netdev_dbg(dev, "Error Warning IRQ\n");
                        priv->can.can_stats.error_warning++;
 
                        cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (tec > rec) ?
+                       cf->data[1] = (bec.txerr > bec.rxerr) ?
                                CAN_ERR_CRTL_TX_WARNING :
                                CAN_ERR_CRTL_RX_WARNING;
                }
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
                 */
                if (new_state >= CAN_STATE_ERROR_PASSIVE &&
                    new_state <= CAN_STATE_BUS_OFF) {
-                       dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+                       netdev_dbg(dev, "Error Passive IRQ\n");
                        priv->can.can_stats.error_passive++;
 
                        cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (tec > rec) ?
+                       cf->data[1] = (bec.txerr > bec.rxerr) ?
                                CAN_ERR_CRTL_TX_PASSIVE :
                                CAN_ERR_CRTL_RX_PASSIVE;
                }
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
                if (new_state <= CAN_STATE_ERROR_PASSIVE) {
                        cf->can_id |= CAN_ERR_RESTARTED;
 
-                       dev_dbg(dev->dev.parent, "restarted\n");
+                       netdev_dbg(dev, "restarted\n");
                        priv->can.can_stats.restarts++;
 
                        netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
                 * circumstances. so just enable AT91_IRQ_ERRP, thus
                 * the "fallthrough"
                 */
-               dev_dbg(dev->dev.parent, "Error Active\n");
+               netdev_dbg(dev, "Error Active\n");
                cf->can_id |= CAN_ERR_PROT;
                cf->data[2] = CAN_ERR_PROT_ACTIVE;
        case CAN_STATE_ERROR_WARNING:   /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
 
                cf->can_id |= CAN_ERR_BUSOFF;
 
-               dev_dbg(dev->dev.parent, "bus-off\n");
+               netdev_dbg(dev, "bus-off\n");
                netif_carrier_off(dev);
                priv->can.can_stats.bus_off++;
 
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
        else if (likely(reg_sr & AT91_IRQ_ERRA))
                new_state = CAN_STATE_ERROR_ACTIVE;
        else {
-               dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+               netdev_err(dev, "BUG! hardware in undefined state\n");
                return;
        }
 
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
        .ndo_start_xmit = at91_start_xmit,
 };
 
-static int __init at91_can_probe(struct platform_device *pdev)
+static int __devinit at91_can_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
        struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
        priv->can.clock.freq = clk_get_rate(clk);
        priv->can.bittiming_const = &at91_bittiming_const;
-       priv->can.do_set_bittiming = at91_set_bittiming;
        priv->can.do_set_mode = at91_set_mode;
+       priv->can.do_get_berr_counter = at91_get_berr_counter;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
        priv->reg_base = addr;
        priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
        return 0;
 
  exit_free:
-       free_netdev(dev);
+       free_candev(dev);
  exit_iounmap:
        iounmap(addr);
  exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       free_netdev(dev);
-
        iounmap(priv->reg_base);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
 
        clk_put(priv->clk);
 
+       free_candev(dev);
+
        return 0;
 }
 
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
        .probe          = at91_can_probe,
        .remove         = __devexit_p(at91_can_remove),
        .driver         = {
-               .name   = DRV_NAME,
+               .name   = KBUILD_MODNAME,
                .owner  = THIS_MODULE,
        },
 };
 
 static int __init at91_can_module_init(void)
 {
-       printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
        return platform_driver_register(&at91_can_driver);
 }
 
 static void __exit at91_can_module_exit(void)
 {
        platform_driver_unregister(&at91_can_driver);
-       printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
 }
 
 module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
 
 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
 MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
+MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
index ef443a090ba7cc53a8db4989095bfd888712890c..d4990568baee071cd5ceaaac9a91c6bc5d6cf7c9 100644 (file)
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
 
        unregister_flexcandev(dev);
        platform_set_drvdata(pdev, NULL);
-       free_candev(dev);
        iounmap(priv->base);
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
 
        clk_put(priv->clk);
 
+       free_candev(dev);
+
        return 0;
 }
 
index 6aadc3e32bd5c8bc31c1d5c231e1e9ff99ae9188..7ab534aee4525ded6c8db75e0dde6cbd542d618d 100644 (file)
 #  define RXBSIDH_SHIFT 3
 #define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
 #  define RXBSIDL_IDE   0x08
+#  define RXBSIDL_SRR   0x10
 #  define RXBSIDL_EID   3
 #  define RXBSIDL_SHIFT 5
 #define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
                frame->can_id =
                        (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
                        (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+               if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+                       frame->can_id |= CAN_RTR_FLAG;
        }
        /* Data length */
        frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644 (file)
index 0000000..55ec324
--- /dev/null
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_MSG_OBJ            32
+#define MSG_OBJ_RX             0 /* The receive message object flag. */
+#define MSG_OBJ_TX             1 /* The transmit message object flag. */
+
+#define ENABLE                 1 /* The enable flag */
+#define DISABLE                        0 /* The disable flag */
+#define CAN_CTRL_INIT          0x0001 /* The INIT bit of CANCONT register. */
+#define CAN_CTRL_IE            0x0002 /* The IE bit of CAN control register */
+#define CAN_CTRL_IE_SIE_EIE    0x000e
+#define CAN_CTRL_CCE           0x0040
+#define CAN_CTRL_OPT           0x0080 /* The OPT bit of CANCONT register. */
+#define CAN_OPT_SILENT         0x0008 /* The Silent bit of CANOPT reg. */
+#define CAN_OPT_LBACK          0x0010 /* The LoopBack bit of CANOPT reg. */
+#define CAN_CMASK_RX_TX_SET    0x00f3
+#define CAN_CMASK_RX_TX_GET    0x0073
+#define CAN_CMASK_ALL          0xff
+#define CAN_CMASK_RDWR         0x80
+#define CAN_CMASK_ARB          0x20
+#define CAN_CMASK_CTRL         0x10
+#define CAN_CMASK_MASK         0x40
+#define CAN_CMASK_NEWDAT       0x04
+#define CAN_CMASK_CLRINTPND    0x08
+
+#define CAN_IF_MCONT_NEWDAT    0x8000
+#define CAN_IF_MCONT_INTPND    0x2000
+#define CAN_IF_MCONT_UMASK     0x1000
+#define CAN_IF_MCONT_TXIE      0x0800
+#define CAN_IF_MCONT_RXIE      0x0400
+#define CAN_IF_MCONT_RMTEN     0x0200
+#define CAN_IF_MCONT_TXRQXT    0x0100
+#define CAN_IF_MCONT_EOB       0x0080
+#define CAN_IF_MCONT_DLC       0x000f
+#define CAN_IF_MCONT_MSGLOST   0x4000
+#define CAN_MASK2_MDIR_MXTD    0xc000
+#define CAN_ID2_DIR            0x2000
+#define CAN_ID_MSGVAL          0x8000
+
+#define CAN_STATUS_INT         0x8000
+#define CAN_IF_CREQ_BUSY       0x8000
+#define CAN_ID2_XTD            0x4000
+
+#define CAN_REC                        0x00007f00
+#define CAN_TEC                        0x000000ff
+
+#define PCH_RX_OK              0x00000010
+#define PCH_TX_OK              0x00000008
+#define PCH_BUS_OFF            0x00000080
+#define PCH_EWARN              0x00000040
+#define PCH_EPASSIV            0x00000020
+#define PCH_LEC0               0x00000001
+#define PCH_LEC1               0x00000002
+#define PCH_LEC2               0x00000004
+#define PCH_LEC_ALL            (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
+#define PCH_STUF_ERR           PCH_LEC0
+#define PCH_FORM_ERR           PCH_LEC1
+#define PCH_ACK_ERR            (PCH_LEC0 | PCH_LEC1)
+#define PCH_BIT1_ERR           PCH_LEC2
+#define PCH_BIT0_ERR           (PCH_LEC0 | PCH_LEC2)
+#define PCH_CRC_ERR            (PCH_LEC1 | PCH_LEC2)
+
+/* bit position of certain controller bits. */
+#define BIT_BITT_BRP           0
+#define BIT_BITT_SJW           6
+#define BIT_BITT_TSEG1         8
+#define BIT_BITT_TSEG2         12
+#define BIT_IF1_MCONT_RXIE     10
+#define BIT_IF2_MCONT_TXIE     11
+#define BIT_BRPE_BRPE          6
+#define BIT_ES_TXERRCNT                0
+#define BIT_ES_RXERRCNT                8
+#define MSK_BITT_BRP           0x3f
+#define MSK_BITT_SJW           0xc0
+#define MSK_BITT_TSEG1         0xf00
+#define MSK_BITT_TSEG2         0x7000
+#define MSK_BRPE_BRPE          0x3c0
+#define MSK_BRPE_GET           0x0f
+#define MSK_CTRL_IE_SIE_EIE    0x07
+#define MSK_MCONT_TXIE         0x08
+#define MSK_MCONT_RXIE         0x10
+#define PCH_CAN_NO_TX_BUFF     1
+#define COUNTER_LIMIT          10
+
+#define PCH_CAN_CLK            50000000        /* 50MHz */
+
+/* Define the number of message object.
+ * PCH CAN communications are done via Message RAM.
+ * The Message RAM consists of 32 message objects. */
+#define PCH_RX_OBJ_NUM         26  /* 1~ PCH_RX_OBJ_NUM is Rx*/
+#define PCH_TX_OBJ_NUM         6  /* PCH_RX_OBJ_NUM is RX ~ Tx*/
+#define PCH_OBJ_NUM            (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+
+#define PCH_FIFO_THRESH                16
+
+enum pch_can_mode {
+       PCH_CAN_ENABLE,
+       PCH_CAN_DISABLE,
+       PCH_CAN_ALL,
+       PCH_CAN_NONE,
+       PCH_CAN_STOP,
+       PCH_CAN_RUN
+};
+
+struct pch_can_regs {
+       u32 cont;
+       u32 stat;
+       u32 errc;
+       u32 bitt;
+       u32 intr;
+       u32 opt;
+       u32 brpe;
+       u32 reserve1;
+       u32 if1_creq;
+       u32 if1_cmask;
+       u32 if1_mask1;
+       u32 if1_mask2;
+       u32 if1_id1;
+       u32 if1_id2;
+       u32 if1_mcont;
+       u32 if1_dataa1;
+       u32 if1_dataa2;
+       u32 if1_datab1;
+       u32 if1_datab2;
+       u32 reserve2;
+       u32 reserve3[12];
+       u32 if2_creq;
+       u32 if2_cmask;
+       u32 if2_mask1;
+       u32 if2_mask2;
+       u32 if2_id1;
+       u32 if2_id2;
+       u32 if2_mcont;
+       u32 if2_dataa1;
+       u32 if2_dataa2;
+       u32 if2_datab1;
+       u32 if2_datab2;
+       u32 reserve4;
+       u32 reserve5[20];
+       u32 treq1;
+       u32 treq2;
+       u32 reserve6[2];
+       u32 reserve7[56];
+       u32 reserve8[3];
+       u32 srst;
+};
+
+struct pch_can_priv {
+       struct can_priv can;
+       unsigned int can_num;
+       struct pci_dev *dev;
+       unsigned int tx_enable[MAX_MSG_OBJ];
+       unsigned int rx_enable[MAX_MSG_OBJ];
+       unsigned int rx_link[MAX_MSG_OBJ];
+       unsigned int int_enables;
+       unsigned int int_stat;
+       struct net_device *ndev;
+       spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
+       unsigned int msg_obj[MAX_MSG_OBJ];
+       struct pch_can_regs __iomem *regs;
+       struct napi_struct napi;
+       unsigned int tx_obj;    /* Point next Tx Obj index */
+       unsigned int use_msi;
+};
+
+static struct can_bittiming_const pch_can_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024, /* 6bit + extended 4bit */
+       .brp_inc = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
+       {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
+       {0,}
+};
+MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
+
+static inline void pch_can_bit_set(u32 *addr, u32 mask)
+{
+       iowrite32(ioread32(addr) | mask, addr);
+}
+
+static inline void pch_can_bit_clear(u32 *addr, u32 mask)
+{
+       iowrite32(ioread32(addr) & ~mask, addr);
+}
+
+static void pch_can_set_run_mode(struct pch_can_priv *priv,
+                                enum pch_can_mode mode)
+{
+       switch (mode) {
+       case PCH_CAN_RUN:
+               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+               break;
+
+       case PCH_CAN_STOP:
+               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+               break;
+
+       default:
+               dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+               break;
+       }
+}
+
+static void pch_can_set_optmode(struct pch_can_priv *priv)
+{
+       u32 reg_val = ioread32(&priv->regs->opt);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               reg_val |= CAN_OPT_SILENT;
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               reg_val |= CAN_OPT_LBACK;
+
+       pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+       iowrite32(reg_val, &priv->regs->opt);
+}
+
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+       /* Clearing the IE, SIE and EIE bits of Can control register. */
+       pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+
+       /* Appropriately setting them. */
+       pch_can_bit_set(&priv->regs->cont,
+                       ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
+{
+       /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+       *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+}
+
+static void pch_can_set_int_enables(struct pch_can_priv *priv,
+                                   enum pch_can_mode interrupt_no)
+{
+       switch (interrupt_no) {
+       case PCH_CAN_ENABLE:
+               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+               break;
+
+       case PCH_CAN_DISABLE:
+               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+               break;
+
+       case PCH_CAN_ALL:
+               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+               break;
+
+       case PCH_CAN_NONE:
+               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+               break;
+
+       default:
+               dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+               break;
+       }
+}
+
+static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+{
+       u32 counter = COUNTER_LIMIT;
+       u32 ifx_creq;
+
+       iowrite32(num, creq_addr);
+       while (counter) {
+               ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+               if (!ifx_creq)
+                       break;
+               counter--;
+               udelay(1);
+       }
+       if (!counter)
+               pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
+}
+
+static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                 u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       /* Reading the receive buffer data from RAM to Interface1 registers */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+       /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
+       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                 &priv->regs->if1_cmask);
+
+       if (set == ENABLE) {
+               /* Setting the MsgVal and RxIE bits */
+               pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+               pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+
+       } else if (set == DISABLE) {
+               /* Resetting the MsgVal and RxIE bits */
+               pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+               pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+       }
+
+       pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as receivers. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX)
+                       pch_can_set_rx_enable(priv, i + 1, ENABLE);
+       }
+}
+
+static void pch_can_rx_disable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as receivers. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX)
+                       pch_can_set_rx_enable(priv, i + 1, DISABLE);
+       }
+}
+
+static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       /* Reading the Msg buffer from Message RAM to Interface2 registers. */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+       /* Setting the IF2CMASK register for accessing the
+               MsgVal and TxIE bits */
+       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                &priv->regs->if2_cmask);
+
+       if (set == ENABLE) {
+               /* Setting the MsgVal and TxIE bits */
+               pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+               pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+       } else if (set == DISABLE) {
+               /* Resetting the MsgVal and TxIE bits. */
+               pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+               pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+       }
+
+       pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as transmit object. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX)
+                       pch_can_set_tx_enable(priv, i + 1, ENABLE);
+       }
+}
+
+static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+{
+       int i;
+
+       /* Traversing to obtain the object configured as transmit object. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX)
+                       pch_can_set_tx_enable(priv, i + 1, DISABLE);
+       }
+}
+
+static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                u32 *enable)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+       if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+                       ((ioread32(&priv->regs->if1_mcont)) &
+                       CAN_IF_MCONT_RXIE))
+               *enable = ENABLE;
+       else
+               *enable = DISABLE;
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+                                u32 *enable)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+       if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+                       ((ioread32(&priv->regs->if2_mcont)) &
+                       CAN_IF_MCONT_TXIE)) {
+               *enable = ENABLE;
+       } else {
+               *enable = DISABLE;
+       }
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static int pch_can_int_pending(struct pch_can_priv *priv)
+{
+       return ioread32(&priv->regs->intr) & 0xffff;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+                                      u32 buffer_num, u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
+       if (set == ENABLE)
+               pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+       else
+               pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+
+       pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+                                      u32 buffer_num, u32 *link)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+
+       if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
+               *link = DISABLE;
+       else
+               *link = ENABLE;
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_clear_buffers(struct pch_can_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
+               iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+               iowrite32(0xffff, &priv->regs->if1_mask1);
+               iowrite32(0xffff, &priv->regs->if1_mask2);
+               iowrite32(0x0, &priv->regs->if1_id1);
+               iowrite32(0x0, &priv->regs->if1_id2);
+               iowrite32(0x0, &priv->regs->if1_mcont);
+               iowrite32(0x0, &priv->regs->if1_dataa1);
+               iowrite32(0x0, &priv->regs->if1_dataa2);
+               iowrite32(0x0, &priv->regs->if1_datab1);
+               iowrite32(0x0, &priv->regs->if1_datab2);
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                         CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                         &priv->regs->if1_cmask);
+               pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+       }
+
+       for (i = i;  i < PCH_OBJ_NUM; i++) {
+               iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+               iowrite32(0xffff, &priv->regs->if2_mask1);
+               iowrite32(0xffff, &priv->regs->if2_mask2);
+               iowrite32(0x0, &priv->regs->if2_id1);
+               iowrite32(0x0, &priv->regs->if2_id2);
+               iowrite32(0x0, &priv->regs->if2_mcont);
+               iowrite32(0x0, &priv->regs->if2_dataa1);
+               iowrite32(0x0, &priv->regs->if2_dataa2);
+               iowrite32(0x0, &priv->regs->if2_datab1);
+               iowrite32(0x0, &priv->regs->if2_datab2);
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                         CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                         &priv->regs->if2_cmask);
+               pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+       }
+}
+
+static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
+{
+       int i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+                       iowrite32(CAN_CMASK_RX_TX_GET,
+                               &priv->regs->if1_cmask);
+                       pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+
+                       iowrite32(0x0, &priv->regs->if1_id1);
+                       iowrite32(0x0, &priv->regs->if1_id2);
+
+                       pch_can_bit_set(&priv->regs->if1_mcont,
+                                       CAN_IF_MCONT_UMASK);
+
+                       /* Set FIFO mode set to 0 except last Rx Obj*/
+                       pch_can_bit_clear(&priv->regs->if1_mcont,
+                                         CAN_IF_MCONT_EOB);
+                       /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+                       if (i == (PCH_RX_OBJ_NUM - 1))
+                               pch_can_bit_set(&priv->regs->if1_mcont,
+                                                 CAN_IF_MCONT_EOB);
+
+                       iowrite32(0, &priv->regs->if1_mask1);
+                       pch_can_bit_clear(&priv->regs->if1_mask2,
+                                         0x1fff | CAN_MASK2_MDIR_MXTD);
+
+                       /* Setting CMASK for writing */
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                                 CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                                 &priv->regs->if1_cmask);
+
+                       pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+               } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
+                       iowrite32(CAN_CMASK_RX_TX_GET,
+                               &priv->regs->if2_cmask);
+                       pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+
+                       /* Resetting DIR bit for reception */
+                       iowrite32(0x0, &priv->regs->if2_id1);
+                       iowrite32(0x0, &priv->regs->if2_id2);
+                       pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+                       /* Setting EOB bit for transmitter */
+                       iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+
+                       pch_can_bit_set(&priv->regs->if2_mcont,
+                                       CAN_IF_MCONT_UMASK);
+
+                       iowrite32(0, &priv->regs->if2_mask1);
+                       pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+
+                       /* Setting CMASK for writing */
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+                                 CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                                 &priv->regs->if2_cmask);
+
+                       pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+               }
+       }
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_init(struct pch_can_priv *priv)
+{
+       /* Stopping the Can device. */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Clearing all the message object buffers. */
+       pch_can_clear_buffers(priv);
+
+       /* Configuring the respective message object as either rx/tx object. */
+       pch_can_config_rx_tx_buffers(priv);
+
+       /* Enabling the interrupts. */
+       pch_can_set_int_enables(priv, PCH_CAN_ALL);
+}
+
+static void pch_can_release(struct pch_can_priv *priv)
+{
+       /* Stooping the CAN device. */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Disabling the interrupts. */
+       pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+       /* Disabling all the receive object. */
+       pch_can_rx_disable_all(priv);
+
+       /* Disabling all the transmit object. */
+       pch_can_tx_disable_all(priv);
+}
+
+/* This function clears interrupt(s) from the CAN device. */
+static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
+{
+       if (mask == CAN_STATUS_INT) {
+               ioread32(&priv->regs->stat);
+               return;
+       }
+
+       /* Clear interrupt for transmit object */
+       if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+               /* Setting CMASK for clearing interrupts for
+                                        frame transmission. */
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+                         &priv->regs->if2_cmask);
+
+               /* Resetting the ID registers. */
+               pch_can_bit_set(&priv->regs->if2_id2,
+                              CAN_ID2_DIR | (0x7ff << 2));
+               iowrite32(0x0, &priv->regs->if2_id1);
+
+               /* Claring NewDat, TxRqst & IntPnd */
+               pch_can_bit_clear(&priv->regs->if2_mcont,
+                                 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+                                 CAN_IF_MCONT_TXRQXT);
+               pch_can_check_if_busy(&priv->regs->if2_creq, mask);
+       } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+               /* Setting CMASK for clearing the reception interrupts. */
+               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+                         &priv->regs->if1_cmask);
+
+               /* Clearing the Dir bit. */
+               pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+               /* Clearing NewDat & IntPnd */
+               pch_can_bit_clear(&priv->regs->if1_mcont,
+                                 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+
+               pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+       }
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+       return (ioread32(&priv->regs->treq1) & 0xffff) |
+              ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+}
+
+static void pch_can_reset(struct pch_can_priv *priv)
+{
+       /* write to sw reset register */
+       iowrite32(1, &priv->regs->srst);
+       iowrite32(0, &priv->regs->srst);
+}
+
+static void pch_can_error(struct net_device *ndev, u32 status)
+{
+       struct sk_buff *skb;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf;
+       u32 errc;
+       struct net_device_stats *stats = &(priv->ndev->stats);
+       enum can_state state = priv->can.state;
+
+       skb = alloc_can_err_skb(ndev, &cf);
+       if (!skb)
+               return;
+
+       if (status & PCH_BUS_OFF) {
+               pch_can_tx_disable_all(priv);
+               pch_can_rx_disable_all(priv);
+               state = CAN_STATE_BUS_OFF;
+               cf->can_id |= CAN_ERR_BUSOFF;
+               can_bus_off(ndev);
+               pch_can_set_run_mode(priv, PCH_CAN_RUN);
+               dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
+       }
+
+       /* Warning interrupt. */
+       if (status & PCH_EWARN) {
+               state = CAN_STATE_ERROR_WARNING;
+               priv->can.can_stats.error_warning++;
+               cf->can_id |= CAN_ERR_CRTL;
+               errc = ioread32(&priv->regs->errc);
+               if (((errc & CAN_REC) >> 8) > 96)
+                       cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+               if ((errc & CAN_TEC) > 96)
+                       cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+               dev_warn(&ndev->dev,
+                       "%s -> Error Counter is more than 96.\n", __func__);
+       }
+       /* Error passive interrupt. */
+       if (status & PCH_EPASSIV) {
+               priv->can.can_stats.error_passive++;
+               state = CAN_STATE_ERROR_PASSIVE;
+               cf->can_id |= CAN_ERR_CRTL;
+               errc = ioread32(&priv->regs->errc);
+               if (((errc & CAN_REC) >> 8) > 127)
+                       cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+               if ((errc & CAN_TEC) > 127)
+                       cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+               dev_err(&ndev->dev,
+                       "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
+       }
+
+       if (status & PCH_LEC_ALL) {
+               priv->can.can_stats.bus_error++;
+               stats->rx_errors++;
+               switch (status & PCH_LEC_ALL) {
+               case PCH_STUF_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_STUFF;
+                       break;
+               case PCH_FORM_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_FORM;
+                       break;
+               case PCH_ACK_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+                                      CAN_ERR_PROT_LOC_ACK_DEL;
+                       break;
+               case PCH_BIT1_ERR:
+               case PCH_BIT0_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_BIT;
+                       break;
+               case PCH_CRC_ERR:
+                       cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+                                      CAN_ERR_PROT_LOC_CRC_DEL;
+                       break;
+               default:
+                       iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
+                       break;
+               }
+
+       }
+
+       priv->can.state = state;
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+}
+
+static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = (struct net_device *)dev_id;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+       napi_schedule(&priv->napi);
+
+       return IRQ_HANDLED;
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+{
+       u32 reg;
+       canid_t id;
+       u32 ide;
+       u32 rtr;
+       int i, j, k;
+       int rcv_pkts = 0;
+       struct sk_buff *skb;
+       struct can_frame *cf;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &(priv->ndev->stats);
+
+       /* Reading the messsage object from the Message RAM */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+
+       /* Reading the MCONT register. */
+       reg = ioread32(&priv->regs->if1_mcont);
+       reg &= 0xffff;
+
+       for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+               /* If MsgLost bit set. */
+               if (reg & CAN_IF_MCONT_MSGLOST) {
+                       dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
+                       pch_can_bit_clear(&priv->regs->if1_mcont,
+                                         CAN_IF_MCONT_MSGLOST);
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+                                 &priv->regs->if1_cmask);
+                       pch_can_check_if_busy(&priv->regs->if1_creq, k);
+
+                       skb = alloc_can_err_skb(ndev, &cf);
+                       if (!skb)
+                               return -ENOMEM;
+
+                       priv->can.can_stats.error_passive++;
+                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+                       cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+                       stats->rx_packets++;
+                       stats->rx_bytes += cf->can_dlc;
+
+                       netif_receive_skb(skb);
+                       rcv_pkts++;
+                       goto RX_NEXT;
+               }
+               if (!(reg & CAN_IF_MCONT_NEWDAT))
+                       goto RX_NEXT;
+
+               skb = alloc_can_skb(priv->ndev, &cf);
+               if (!skb)
+                       return -ENOMEM;
+
+               /* Get Received data */
+               ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+               if (ide) {
+                       id = (ioread32(&priv->regs->if1_id1) & 0xffff);
+                       id |= (((ioread32(&priv->regs->if1_id2)) &
+                                           0x1fff) << 16);
+                       cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+               } else {
+                       id = (((ioread32(&priv->regs->if1_id2)) &
+                                         (CAN_SFF_MASK << 2)) >> 2);
+                       cf->can_id = (id & CAN_SFF_MASK);
+               }
+
+               rtr = (ioread32(&priv->regs->if1_id2) &  CAN_ID2_DIR);
+               if (rtr) {
+                       cf->can_dlc = 0;
+                       cf->can_id |= CAN_RTR_FLAG;
+               } else {
+                       cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
+                                                  0x0f);
+               }
+
+               for (i = 0, j = 0; i < cf->can_dlc; j++) {
+                       reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+                       cf->data[i++] = cpu_to_le32(reg & 0xff);
+                       if (i == cf->can_dlc)
+                               break;
+                       cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+               }
+
+               netif_receive_skb(skb);
+               rcv_pkts++;
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+
+               if (k < PCH_FIFO_THRESH) {
+                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
+                                 CAN_CMASK_ARB, &priv->regs->if1_cmask);
+
+                       /* Clearing the Dir bit. */
+                       pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+                       /* Clearing NewDat & IntPnd */
+                       pch_can_bit_clear(&priv->regs->if1_mcont,
+                                         CAN_IF_MCONT_INTPND);
+                       pch_can_check_if_busy(&priv->regs->if1_creq, k);
+               } else if (k > PCH_FIFO_THRESH) {
+                       pch_can_int_clr(priv, k);
+               } else if (k == PCH_FIFO_THRESH) {
+                       int cnt;
+                       for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+                               pch_can_int_clr(priv, cnt+1);
+               }
+RX_NEXT:
+               /* Reading the messsage object from the Message RAM */
+               iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+               pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
+               reg = ioread32(&priv->regs->if1_mcont);
+       }
+
+       return rcv_pkts;
+}
+static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+{
+       struct net_device *ndev = napi->dev;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &(priv->ndev->stats);
+       u32 dlc;
+       u32 int_stat;
+       int rcv_pkts = 0;
+       u32 reg_stat;
+       unsigned long flags;
+
+       int_stat = pch_can_int_pending(priv);
+       if (!int_stat)
+               return 0;
+
+INT_STAT:
+       if (int_stat == CAN_STATUS_INT) {
+               reg_stat = ioread32(&priv->regs->stat);
+               if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
+                       if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
+                               pch_can_error(ndev, reg_stat);
+               }
+
+               if (reg_stat & PCH_TX_OK) {
+                       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+                       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+                       pch_can_check_if_busy(&priv->regs->if2_creq,
+                                              ioread32(&priv->regs->intr));
+                       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+                       pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+               }
+
+               if (reg_stat & PCH_RX_OK)
+                       pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+
+               int_stat = pch_can_int_pending(priv);
+               if (int_stat == CAN_STATUS_INT)
+                       goto INT_STAT;
+       }
+
+MSG_OBJ:
+       if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
+               spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+               rcv_pkts = pch_can_rx_normal(ndev, int_stat);
+               spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+               if (rcv_pkts < 0)
+                       return 0;
+       } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
+               if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+                       /* Handle transmission interrupt */
+                       can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
+                       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+                       iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+                                 &priv->regs->if2_cmask);
+                       dlc = ioread32(&priv->regs->if2_mcont) &
+                                      CAN_IF_MCONT_DLC;
+                       pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
+                       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+                       if (dlc > 8)
+                               dlc = 8;
+                       stats->tx_bytes += dlc;
+                       stats->tx_packets++;
+               }
+       }
+
+       int_stat = pch_can_int_pending(priv);
+       if (int_stat == CAN_STATUS_INT)
+               goto INT_STAT;
+       else if (int_stat >= 1 && int_stat <= 32)
+               goto MSG_OBJ;
+
+       napi_complete(napi);
+       pch_can_set_int_enables(priv, PCH_CAN_ALL);
+
+       return rcv_pkts;
+}
+
+static int pch_set_bittiming(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       const struct can_bittiming *bt = &priv->can.bittiming;
+       u32 canbit;
+       u32 bepe;
+       u32 brp;
+
+       /* Setting the CCE bit for accessing the Can Timing register. */
+       pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+
+       brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
+       canbit = brp & MSK_BITT_BRP;
+       canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
+       canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
+       canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
+       bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+       iowrite32(canbit, &priv->regs->bitt);
+       iowrite32(bepe, &priv->regs->brpe);
+       pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+
+       return 0;
+}
+
+static void pch_can_start(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       if (priv->can.state != CAN_STATE_STOPPED)
+               pch_can_reset(priv);
+
+       pch_set_bittiming(ndev);
+       pch_can_set_optmode(priv);
+
+       pch_can_tx_enable_all(priv);
+       pch_can_rx_enable_all(priv);
+
+       /* Setting the CAN to run mode. */
+       pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       return;
+}
+
+static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+       int ret = 0;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               pch_can_start(ndev);
+               netif_wake_queue(ndev);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
+static int pch_can_open(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       int retval;
+
+       retval = pci_enable_msi(priv->dev);
+       if (retval) {
+               dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
+               priv->use_msi = 0;
+       } else {
+               dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
+               priv->use_msi = 1;
+       }
+
+       /* Regsitering the interrupt. */
+       retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
+                            ndev->name, ndev);
+       if (retval) {
+               dev_err(&ndev->dev, "request_irq failed.\n");
+               goto req_irq_err;
+       }
+
+       /* Open common can device */
+       retval = open_candev(ndev);
+       if (retval) {
+               dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+               goto err_open_candev;
+       }
+
+       pch_can_init(priv);
+       pch_can_start(ndev);
+       napi_enable(&priv->napi);
+       netif_start_queue(ndev);
+
+       return 0;
+
+err_open_candev:
+       free_irq(priv->dev->irq, ndev);
+req_irq_err:
+       if (priv->use_msi)
+               pci_disable_msi(priv->dev);
+
+       pch_can_release(priv);
+
+       return retval;
+}
+
+static int pch_close(struct net_device *ndev)
+{
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       napi_disable(&priv->napi);
+       pch_can_release(priv);
+       free_irq(priv->dev->irq, ndev);
+       if (priv->use_msi)
+               pci_disable_msi(priv->dev);
+       close_candev(ndev);
+       priv->can.state = CAN_STATE_STOPPED;
+       return 0;
+}
+
+static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
+{
+       u32 buffer_status = 0;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       /* Getting the message object status. */
+       buffer_status = (u32) pch_can_get_buffer_status(priv);
+
+       return buffer_status & obj_id;
+}
+
+
+static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       int i, j;
+       unsigned long flags;
+       struct pch_can_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       int tx_buffer_avail = 0;
+
+       if (can_dropped_invalid_skb(ndev, skb))
+               return NETDEV_TX_OK;
+
+       if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+               while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
+                                          PCH_RX_OBJ_NUM)))
+                       udelay(500);
+
+               priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+               tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+       } else {
+               tx_buffer_avail = priv->tx_obj;
+       }
+       priv->tx_obj++;
+
+       /* Attaining the lock. */
+       spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+       /* Reading the Msg Obj from the Msg RAM to the Interface register. */
+       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+       /* Setting the CMASK register. */
+       pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+
+       /* If ID extended is set. */
+       pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
+       pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+       if (cf->can_id & CAN_EFF_FLAG) {
+               pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
+               pch_can_bit_set(&priv->regs->if2_id2,
+                               ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+       } else {
+               pch_can_bit_set(&priv->regs->if2_id1, 0);
+               pch_can_bit_set(&priv->regs->if2_id2,
+                               (cf->can_id & CAN_SFF_MASK) << 2);
+       }
+
+       /* If remote frame has to be transmitted.. */
+       if (cf->can_id & CAN_RTR_FLAG)
+               pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+       for (i = 0, j = 0; i < cf->can_dlc; j++) {
+               iowrite32(le32_to_cpu(cf->data[i++]),
+                        (&priv->regs->if2_dataa1) + j*4);
+               if (i == cf->can_dlc)
+                       break;
+               iowrite32(le32_to_cpu(cf->data[i++] << 8),
+                        (&priv->regs->if2_dataa1) + j*4);
+       }
+
+       can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+
+       /* Updating the size of the data. */
+       pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
+       pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+
+       /* Clearing IntPend, NewDat & TxRqst */
+       pch_can_bit_clear(&priv->regs->if2_mcont,
+                         CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+                         CAN_IF_MCONT_TXRQXT);
+
+       /* Setting NewDat, TxRqst bits */
+       pch_can_bit_set(&priv->regs->if2_mcont,
+                       CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+
+       pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+       spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops pch_can_netdev_ops = {
+       .ndo_open               = pch_can_open,
+       .ndo_stop               = pch_close,
+       .ndo_start_xmit         = pch_xmit,
+};
+
+static void __devexit pch_can_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct pch_can_priv *priv = netdev_priv(ndev);
+
+       unregister_candev(priv->ndev);
+       free_candev(priv->ndev);
+       pci_iounmap(pdev, priv->regs);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       pch_can_reset(priv);
+}
+
+#ifdef CONFIG_PM
+static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       int i;                  /* Counter variable. */
+       int retval;             /* Return value. */
+       u32 buf_stat;   /* Variable for reading the transmit buffer status. */
+       u32 counter = 0xFFFFFF;
+
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct pch_can_priv *priv = netdev_priv(dev);
+
+       /* Stop the CAN controller */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Indicate that we are aboutto/in suspend */
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       /* Waiting for all transmission to complete. */
+       while (counter) {
+               buf_stat = pch_can_get_buffer_status(priv);
+               if (!buf_stat)
+                       break;
+               counter--;
+               udelay(1);
+       }
+       if (!counter)
+               dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+
+       /* Save interrupt configuration and then disable them */
+       pch_can_get_int_enables(priv, &(priv->int_enables));
+       pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+       /* Save Tx buffer enable state */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX)
+                       pch_can_get_tx_enable(priv, i + 1,
+                                             &(priv->tx_enable[i]));
+       }
+
+       /* Disable all Transmit buffers */
+       pch_can_tx_disable_all(priv);
+
+       /* Save Rx buffer enable state */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+                       pch_can_get_rx_enable(priv, i + 1,
+                                               &(priv->rx_enable[i]));
+                       pch_can_get_rx_buffer_link(priv, i + 1,
+                                               &(priv->rx_link[i]));
+               }
+       }
+
+       /* Disable all Receive buffers */
+       pch_can_rx_disable_all(priv);
+       retval = pci_save_state(pdev);
+       if (retval) {
+               dev_err(&pdev->dev, "pci_save_state failed.\n");
+       } else {
+               pci_enable_wake(pdev, PCI_D3hot, 0);
+               pci_disable_device(pdev);
+               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       }
+
+       return retval;
+}
+
+static int pch_can_resume(struct pci_dev *pdev)
+{
+       int i;                  /* Counter variable. */
+       int retval;             /* Return variable. */
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct pch_can_priv *priv = netdev_priv(dev);
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       retval = pci_enable_device(pdev);
+       if (retval) {
+               dev_err(&pdev->dev, "pci_enable_device failed.\n");
+               return retval;
+       }
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* Disabling all interrupts. */
+       pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+       /* Setting the CAN device in Stop Mode. */
+       pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+       /* Configuring the transmit and receive buffers. */
+       pch_can_config_rx_tx_buffers(priv);
+
+       /* Restore the CAN state */
+       pch_set_bittiming(dev);
+
+       /* Listen/Active */
+       pch_can_set_optmode(priv);
+
+       /* Enabling the transmit buffer. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_TX) {
+                       pch_can_set_tx_enable(priv, i + 1,
+                                             priv->tx_enable[i]);
+               }
+       }
+
+       /* Configuring the receive buffer and enabling them. */
+       for (i = 0; i < PCH_OBJ_NUM; i++) {
+               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+                       /* Restore buffer link */
+                       pch_can_set_rx_buffer_link(priv, i + 1,
+                                                  priv->rx_link[i]);
+
+                       /* Restore buffer enables */
+                       pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
+               }
+       }
+
+       /* Enable CAN Interrupts */
+       pch_can_set_int_custom(priv);
+
+       /* Restore Run Mode */
+       pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+       return retval;
+}
+#else
+#define pch_can_suspend NULL
+#define pch_can_resume NULL
+#endif
+
+static int pch_can_get_berr_counter(const struct net_device *dev,
+                                   struct can_berr_counter *bec)
+{
+       struct pch_can_priv *priv = netdev_priv(dev);
+
+       bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
+       bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+
+       return 0;
+}
+
+static int __devinit pch_can_probe(struct pci_dev *pdev,
+                                  const struct pci_device_id *id)
+{
+       struct net_device *ndev;
+       struct pch_can_priv *priv;
+       int rc;
+       int index;
+       void __iomem *addr;
+
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
+               goto probe_exit_endev;
+       }
+
+       rc = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
+               goto probe_exit_pcireq;
+       }
+
+       addr = pci_iomap(pdev, 1, 0);
+       if (!addr) {
+               rc = -EIO;
+               dev_err(&pdev->dev, "Failed pci_iomap\n");
+               goto probe_exit_ipmap;
+       }
+
+       ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+       if (!ndev) {
+               rc = -ENOMEM;
+               dev_err(&pdev->dev, "Failed alloc_candev\n");
+               goto probe_exit_alloc_candev;
+       }
+
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+       priv->regs = addr;
+       priv->dev = pdev;
+       priv->can.bittiming_const = &pch_can_bittiming_const;
+       priv->can.do_set_mode = pch_can_do_set_mode;
+       priv->can.do_get_berr_counter = pch_can_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+                                      CAN_CTRLMODE_LOOPBACK;
+       priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+
+       ndev->irq = pdev->irq;
+       ndev->flags |= IFF_ECHO;
+
+       pci_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       ndev->netdev_ops = &pch_can_netdev_ops;
+
+       priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
+       for (index = 0; index < PCH_RX_OBJ_NUM;)
+               priv->msg_obj[index++] = MSG_OBJ_RX;
+
+       for (index = index;  index < PCH_OBJ_NUM;)
+               priv->msg_obj[index++] = MSG_OBJ_TX;
+
+       netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+
+       rc = register_candev(ndev);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
+               goto probe_exit_reg_candev;
+       }
+
+       return 0;
+
+probe_exit_reg_candev:
+       free_candev(ndev);
+probe_exit_alloc_candev:
+       pci_iounmap(pdev, addr);
+probe_exit_ipmap:
+       pci_release_regions(pdev);
+probe_exit_pcireq:
+       pci_disable_device(pdev);
+probe_exit_endev:
+       return rc;
+}
+
+static struct pci_driver pch_can_pcidev = {
+       .name = "pch_can",
+       .id_table = pch_pci_tbl,
+       .probe = pch_can_probe,
+       .remove = __devexit_p(pch_can_remove),
+       .suspend = pch_can_suspend,
+       .resume = pch_can_resume,
+};
+
+static int __init pch_can_pci_init(void)
+{
+       return pci_register_driver(&pch_can_pcidev);
+}
+module_init(pch_can_pci_init);
+
+static void __exit pch_can_pci_exit(void)
+{
+       pci_unregister_driver(&pch_can_pcidev);
+}
+module_exit(pch_can_pci_exit);
+
+MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.94");
index ae3505afd682ccf58a9bd09dc6112ae83eaf96c4..6fdc031daaae18b23397b64e152d5162725d992c 100644 (file)
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
           - esd CAN-PCIe/2000
           - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
           - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config CAN_TSCAN1
+       tristate "TS-CAN1 PC104 boards"
+       depends on ISA
+       help
+       This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+       http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+       The driver supports multiple boards and automatically configures them:
+       PLD IO base addresses are read from jumpers JP1 and JP2,
+       IRQ numbers are read from jumpers JP4 and JP5,
+       SJA1000 IO base addresses are chosen heuristically (first that works).
+
 endif
index ce924553995d5d850c73f4e865a189f685795b18..2c591eb321c7987f0b55f0852727eba7f1b36192 100644 (file)
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
 obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
 obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
 obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644 (file)
index 0000000..9756099
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
+ *
+ * Copyright 2010 Andre B. Oliveira
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * References:
+ * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
+ *     http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "sja1000.h"
+
+MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
+MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
+#define TSCAN1_MAXDEV 4
+
+/* PLD registers address offsets */
+#define TSCAN1_ID1     0
+#define TSCAN1_ID2     1
+#define TSCAN1_VERSION 2
+#define TSCAN1_LED     3
+#define TSCAN1_PAGE    4
+#define TSCAN1_MODE    5
+#define TSCAN1_JUMPERS 6
+
+/* PLD board identifier registers magic values */
+#define TSCAN1_ID1_VALUE 0xf6
+#define TSCAN1_ID2_VALUE 0xb9
+
+/* PLD mode register SJA1000 IO enable bit */
+#define TSCAN1_MODE_ENABLE 0x40
+
+/* PLD jumpers register bits */
+#define TSCAN1_JP4 0x10
+#define TSCAN1_JP5 0x20
+
+/* PLD IO base addresses start */
+#define TSCAN1_PLD_ADDRESS 0x150
+
+/* PLD register space size */
+#define TSCAN1_PLD_SIZE 8
+
+/* SJA1000 register space size */
+#define TSCAN1_SJA1000_SIZE 32
+
+/* SJA1000 crystal frequency (16MHz) */
+#define TSCAN1_SJA1000_XTAL 16000000
+
+/* SJA1000 IO base addresses */
+static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+       0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
+};
+
+/* Read SJA1000 register */
+static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
+{
+       return inb((unsigned long)priv->reg_base + reg);
+}
+
+/* Write SJA1000 register */
+static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
+{
+       outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
+static int __devinit tscan1_probe(struct device *dev, unsigned id)
+{
+       struct net_device *netdev;
+       struct sja1000_priv *priv;
+       unsigned long pld_base, sja1000_base;
+       int irq, i;
+
+       pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
+       if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
+               return -EBUSY;
+
+       if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
+           inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
+               release_region(pld_base, TSCAN1_PLD_SIZE);
+               return -ENODEV;
+       }
+
+       switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
+       case TSCAN1_JP4:
+               irq = 6;
+               break;
+       case TSCAN1_JP5:
+               irq = 7;
+               break;
+       case TSCAN1_JP4 | TSCAN1_JP5:
+               irq = 5;
+               break;
+       default:
+               dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
+               release_region(pld_base, TSCAN1_PLD_SIZE);
+               return -EINVAL;
+       }
+
+       netdev = alloc_sja1000dev(0);
+       if (!netdev) {
+               release_region(pld_base, TSCAN1_PLD_SIZE);
+               return -ENOMEM;
+       }
+
+       dev_set_drvdata(dev, netdev);
+       SET_NETDEV_DEV(netdev, dev);
+
+       netdev->base_addr = pld_base;
+       netdev->irq = irq;
+
+       priv = netdev_priv(netdev);
+       priv->read_reg = tscan1_read;
+       priv->write_reg = tscan1_write;
+       priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
+       priv->cdr = CDR_CBP | CDR_CLK_OFF;
+       priv->ocr = OCR_TX0_PUSHPULL;
+
+       /* Select the first SJA1000 IO address that is free and that works */
+       for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
+               sja1000_base = tscan1_sja1000_addresses[i];
+               if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
+                                                               dev_name(dev)))
+                       continue;
+
+               /* Set SJA1000 IO base address and enable it */
+               outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
+
+               priv->reg_base = (void __iomem *)sja1000_base;
+               if (!register_sja1000dev(netdev)) {
+                       /* SJA1000 probe succeeded; turn LED off and return */
+                       outb(0, pld_base + TSCAN1_LED);
+                       netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
+                                               pld_base, sja1000_base, irq);
+                       return 0;
+               }
+
+               /* SJA1000 probe failed; release and try next address */
+               outb(0, pld_base + TSCAN1_MODE);
+               release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+       }
+
+       dev_err(dev, "failed to assign SJA1000 IO address\n");
+       dev_set_drvdata(dev, NULL);
+       free_sja1000dev(netdev);
+       release_region(pld_base, TSCAN1_PLD_SIZE);
+       return -ENXIO;
+}
+
+static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+{
+       struct net_device *netdev;
+       struct sja1000_priv *priv;
+       unsigned long pld_base, sja1000_base;
+
+       netdev = dev_get_drvdata(dev);
+       unregister_sja1000dev(netdev);
+       dev_set_drvdata(dev, NULL);
+
+       priv = netdev_priv(netdev);
+       pld_base = netdev->base_addr;
+       sja1000_base = (unsigned long)priv->reg_base;
+
+       outb(0, pld_base + TSCAN1_MODE);        /* disable SJA1000 IO space */
+
+       release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+       release_region(pld_base, TSCAN1_PLD_SIZE);
+
+       free_sja1000dev(netdev);
+
+       return 0;
+}
+
+static struct isa_driver tscan1_isa_driver = {
+       .probe = tscan1_probe,
+       .remove = __devexit_p(tscan1_remove),
+       .driver = {
+               .name = "tscan1",
+       },
+};
+
+static int __init tscan1_init(void)
+{
+       return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
+}
+module_init(tscan1_init);
+
+static void __exit tscan1_exit(void)
+{
+       isa_unregister_driver(&tscan1_isa_driver);
+}
+module_exit(tscan1_exit);
index a04ce6a5f637142a1aba4cb933c03bb8485cd5ec..4e3c12371aaecea6de7e3057b6f8b4713387355f 100644 (file)
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
        }
 
        if (!(adap->flags & QUEUES_BOUND)) {
-               err = bind_qsets(adap);
-               if (err) {
-                       CH_ERR(adap, "failed to bind qsets, err %d\n", err);
+               int ret = bind_qsets(adap);
+
+               if (ret < 0) {
+                       CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
                        t3_intr_disable(adap);
                        free_irq_resources(adap);
+                       err = ret;
                        goto out;
                }
                adap->flags |= QUEUES_BOUND;
index eaa49e4119f146c047903a37dfda408086910faa..3d4253d311eb50972c3037fb5e7b52ef2b18395b 100644 (file)
@@ -281,7 +281,6 @@ struct sge_rspq;
 
 struct port_info {
        struct adapter *adapter;
-       struct vlan_group *vlan_grp;
        u16    viid;
        s16    xact_addr_filt;        /* index of exact MAC address filter */
        u16    rss_size;              /* size of VI's RSS table slice */
index 87054e0a57467841be71bcb3e3cfc7517c54bbe8..f17703f410b3673a096aec05f9085329650c5c52 100644 (file)
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
         * that step explicitly.
         */
        ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
-                           pi->vlan_grp != NULL, true);
+                           !!(dev->features & NETIF_F_HW_VLAN_RX), true);
        if (ret == 0) {
                ret = t4_change_mac(pi->adapter, mb, pi->viid,
                                    pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
 
 static int set_flags(struct net_device *dev, u32 flags)
 {
-       return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
+       int err;
+       unsigned long old_feat = dev->features;
+
+       err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
+                                  ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+       if (err)
+               return err;
+
+       if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
+               const struct port_info *pi = netdev_priv(dev);
+
+               err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+                                   -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
+                                   true);
+               if (err)
+                       dev->features = old_feat;
+       }
+       return err;
 }
 
 static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
        return 0;
 }
 
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-       struct port_info *pi = netdev_priv(dev);
-
-       pi->vlan_grp = grp;
-       t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
-                     grp != NULL, true);
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void cxgb_netpoll(struct net_device *dev)
 {
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
        .ndo_validate_addr    = eth_validate_addr,
        .ndo_do_ioctl         = cxgb_ioctl,
        .ndo_change_mtu       = cxgb_change_mtu,
-       .ndo_vlan_rx_register = vlan_rx_register,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller  = cxgb_netpoll,
 #endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
                pi->rx_offload = RX_CSO;
                pi->port_id = i;
                netif_carrier_off(netdev);
-               netif_tx_stop_all_queues(netdev);
                netdev->irq = pdev->irq;
 
                netdev->features |= NETIF_F_SG | TSO_FLAGS;
@@ -3730,6 +3736,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 
                        __set_bit(i, &adapter->registered_device_map);
                        adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
+                       netif_tx_stop_all_queues(adapter->port[i]);
                }
        }
        if (!adapter->registered_device_map) {
index 9967f3debce7e1010bd6ff58df5d6ad2060543d3..17022258ed689101e940ce977c57a4c5540f9e98 100644 (file)
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
 
        if (unlikely(pkt->vlan_ex)) {
-               struct port_info *pi = netdev_priv(rxq->rspq.netdev);
-               struct vlan_group *grp = pi->vlan_grp;
-
+               __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
                rxq->stats.vlan_ex++;
-               if (likely(grp)) {
-                       ret = vlan_gro_frags(&rxq->rspq.napi, grp,
-                                            ntohs(pkt->vlan));
-                       goto stats;
-               }
        }
        ret = napi_gro_frags(&rxq->rspq.napi);
-stats: if (ret == GRO_HELD)
+       if (ret == GRO_HELD)
                rxq->stats.lro_pkts++;
        else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
                rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                skb_checksum_none_assert(skb);
 
        if (unlikely(pkt->vlan_ex)) {
-               struct vlan_group *grp = pi->vlan_grp;
-
+               __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
                rxq->stats.vlan_ex++;
-               if (likely(grp))
-                       vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
-               else
-                       dev_kfree_skb_any(skb);
-       } else
-               netif_receive_skb(skb);
-
+       }
+       netif_receive_skb(skb);
        return 0;
 }
 
index a117f2a0252e8c884f6d890f912556924635cae9..4686c3983fc3044f0e0af47cf75dee71b6dadb02 100644 (file)
@@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
        e1000_clean_all_rx_rings(adapter);
 }
 
-void e1000_reinit_safe(struct e1000_adapter *adapter)
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
 {
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
index 1321cb6401cfc9fd2a68ed0ba524a6fc83c17a32..8e745e74828d5e904d9c6aa8844bb4109bd33af7 100644 (file)
@@ -396,7 +396,9 @@ struct ehea_port_res {
        int swqe_ll_count;
        u32 swqe_id_counter;
        u64 tx_packets;
+       u64 tx_bytes;
        u64 rx_packets;
+       u64 rx_bytes;
        u32 poll_counter;
        struct net_lro_mgr lro_mgr;
        struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
index bb7d306fb44656c3070fa3c20e2ef7b0759c73fc..182b2a7be8dcd02673bd9512d2760cb61b86b884 100644 (file)
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
        struct ehea_port *port = netdev_priv(dev);
        struct net_device_stats *stats = &port->stats;
        struct hcp_ehea_port_cb2 *cb2;
-       u64 hret, rx_packets, tx_packets;
+       u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
        int i;
 
        memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
                ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
 
        rx_packets = 0;
-       for (i = 0; i < port->num_def_qps; i++)
+       for (i = 0; i < port->num_def_qps; i++) {
                rx_packets += port->port_res[i].rx_packets;
+               rx_bytes   += port->port_res[i].rx_bytes;
+       }
 
        tx_packets = 0;
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                tx_packets += port->port_res[i].tx_packets;
+               tx_bytes   += port->port_res[i].tx_bytes;
+       }
 
        stats->tx_packets = tx_packets;
        stats->multicast = cb2->rxmcp;
        stats->rx_errors = cb2->rxuerr;
-       stats->rx_bytes = cb2->rxo;
-       stats->tx_bytes = cb2->txo;
+       stats->rx_bytes = rx_bytes;
+       stats->tx_bytes = tx_bytes;
        stats->rx_packets = rx_packets;
 
 out_herr:
@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
        int skb_arr_rq2_len = pr->rq2_skba.len;
        int skb_arr_rq3_len = pr->rq3_skba.len;
        int processed, processed_rq1, processed_rq2, processed_rq3;
+       u64 processed_bytes = 0;
        int wqe_index, last_wqe_index, rq, port_reset;
 
        processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
                                processed_rq3++;
                        }
 
+                       processed_bytes += skb->len;
                        ehea_proc_skb(pr, cqe, skb);
                } else {
                        pr->p_stats.poll_receive_errors++;
@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
                lro_flush_all(&pr->lro_mgr);
 
        pr->rx_packets += processed;
+       pr->rx_bytes += processed_bytes;
 
        ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
        ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
        enum ehea_eq_type eq_type = EHEA_EQ;
        struct ehea_qp_init_attr *init_attr = NULL;
        int ret = -EIO;
+       u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+       tx_bytes = pr->tx_bytes;
+       tx_packets = pr->tx_packets;
+       rx_bytes = pr->rx_bytes;
+       rx_packets = pr->rx_packets;
 
        memset(pr, 0, sizeof(struct ehea_port_res));
 
+       pr->tx_bytes = rx_bytes;
+       pr->tx_packets = tx_packets;
+       pr->rx_bytes = rx_bytes;
+       pr->rx_packets = rx_packets;
+
        pr->port = port;
        spin_lock_init(&pr->xmit_lock);
        spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        memset(swqe, 0, SWQE_HEADER_SIZE);
        atomic_dec(&pr->swqe_avail);
 
+       if (vlan_tx_tag_present(skb)) {
+               swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+               swqe->vlan_tag = vlan_tx_tag_get(skb);
+       }
+
+       pr->tx_packets++;
+       pr->tx_bytes += skb->len;
+
        if (skb->len <= SWQE3_MAX_IMM) {
                u32 sig_iv = port->sig_comp_iv;
                u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        pr->swqe_id_counter += 1;
 
-       if (vlan_tx_tag_present(skb)) {
-               swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
-               swqe->vlan_tag = vlan_tx_tag_get(skb);
-       }
-
        if (netif_msg_tx_queued(port)) {
                ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
                ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        ehea_post_swqe(pr->qp, swqe);
-       pr->tx_packets++;
 
        if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
                spin_lock_irqsave(&pr->netif_queue, flags);
index 4c4cc80ec0a1938f4a543254a7bf763f0a3ebfd4..49e4ce1246a75e01c30a623de0031707fe0273c9 100644 (file)
@@ -2511,7 +2511,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                                skb_recycle_check(skb, priv->rx_buffer_size +
                                        RXBUF_ALIGNMENT)) {
                        gfar_align_skb(skb);
-                       __skb_queue_head(&priv->rx_recycle, skb);
+                       skb_queue_head(&priv->rx_recycle, skb);
                } else
                        dev_kfree_skb_any(skb);
 
@@ -2594,7 +2594,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb = NULL;
 
-       skb = __skb_dequeue(&priv->rx_recycle);
+       skb = skb_dequeue(&priv->rx_recycle);
        if (!skb)
                skb = gfar_alloc_skb(dev);
 
@@ -2750,7 +2750,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                        if (unlikely(!newskb))
                                newskb = skb;
                        else if (skb)
-                               __skb_queue_head(&priv->rx_recycle, skb);
+                               skb_queue_head(&priv->rx_recycle, skb);
                } else {
                        /* Increment the number of packets */
                        rx_queue->stats.rx_packets++;
index d7a975ee2add393010e9db6ad38fbd1c770603a4..d85edf3119c2625d999b22a41b6159ba9ebf7a24 100644 (file)
@@ -1623,12 +1623,12 @@ err_out:
        return rc;
 }
 
-#ifdef CONFIG_PM
 static void
 jme_set_100m_half(struct jme_adapter *jme)
 {
        u32 bmcr, tmp;
 
+       jme_phy_on(jme);
        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
        tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
                       BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
                phylink = jme_linkstat_from_phy(jme);
        }
 }
-#endif
 
 static inline void
 jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
 }
 
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+       if (jme->reg_pmcs) {
+               jme_set_100m_half(jme);
+
+               if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+                       jme_wait_link(jme);
+
+               jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+       } else {
+               jme_phy_off(jme);
+       }
+}
+
 static int
 jme_close(struct net_device *netdev)
 {
@@ -2991,6 +3005,16 @@ jme_remove_one(struct pci_dev *pdev)
 
 }
 
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       jme_powersave_phy(jme);
+       pci_pme_active(pdev, true);
+}
+
 #ifdef CONFIG_PM
 static int
 jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3052,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
        tasklet_hi_enable(&jme->rxempty_task);
 
        pci_save_state(pdev);
-       if (jme->reg_pmcs) {
-               jme_set_100m_half(jme);
-
-               if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
-                       jme_wait_link(jme);
-
-               jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
-               pci_enable_wake(pdev, PCI_D3cold, true);
-       } else {
-               jme_phy_off(jme);
-       }
-       pci_set_power_state(pdev, PCI_D3cold);
+       jme_powersave_phy(jme);
+       pci_enable_wake(jme->pdev, PCI_D3hot, true);
+       pci_set_power_state(pdev, PCI_D3hot);
 
        return 0;
 }
@@ -3087,6 +3101,7 @@ static struct pci_driver jme_driver = {
        .suspend        = jme_suspend,
        .resume         = jme_resume,
 #endif /* CONFIG_PM */
+       .shutdown       = jme_shutdown,
 };
 
 static int __init
index 4297f6e8c4bc0ab571dd571d9a6f08d5493c6e76..f69e73e2191e9e4a54f3f617e14d8ac6c33734ef 100644 (file)
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
                (unsigned long)status, budget);
 
        work_done = macb_rx(bp, budget);
-       if (work_done < budget)
+       if (work_done < budget) {
                napi_complete(napi);
 
-       /*
-        * We've done what we can to clean the buffers. Make sure we
-        * get notified when new packets arrive.
-        */
-       macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+               /*
+                * We've done what we can to clean the buffers. Make sure we
+                * get notified when new packets arrive.
+                */
+               macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+       }
 
        /* TODO: Handle errors */
 
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                }
 
                if (status & MACB_RX_INT_FLAGS) {
+                       /*
+                        * There's no point taking any more interrupts
+                        * until we have processed the buffers. The
+                        * scheduling call may fail if the poll routine
+                        * is already scheduled, so disable interrupts
+                        * now.
+                        */
+                       macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
                        if (napi_schedule_prep(&bp->napi)) {
-                               /*
-                                * There's no point taking any more interrupts
-                                * until we have processed the buffers
-                                */
-                               macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
                                dev_dbg(&bp->pdev->dev,
                                        "scheduling RX softirq\n");
                                __napi_schedule(&bp->napi);
index b07e4dee80aada693e46e0a4cabca678f8500ad8..02393fdf44c17e57cbf0fd775a86a20fc8f75a5a 100644 (file)
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
        return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
 }
 
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 {
        return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
                        MLX4_CMD_TIME_CLASS_B);
 }
 
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       __be64 *inbox;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       inbox = mailbox->buf;
-
-       inbox[0] = cpu_to_be64(virt);
-       inbox[1] = cpu_to_be64(dma_addr);
-
-       err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-       if (!err)
-               mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
-                         (unsigned long long) dma_addr, (unsigned long long) virt);
-
-       return err;
-}
-
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
 {
        return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
index ab56a2f89b651eb3c33cba5da21e1686a21bb98e..b10c07a1dc1a09d4508d8b376f5f2622aa0c5d18 100644 (file)
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
        return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
 }
 
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
 
index 56371ef328efebec61d170de0d9cc569dcbd5913..451339559bdc16bd64e07e124850f5280ef02b3d 100644 (file)
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
                        goto out;
                }
        }
+
+       if (free < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        mlx4_dbg(dev, "Free MAC index is %d\n", free);
 
        if (table->total == table->max) {
@@ -224,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
                }
        }
 
+       if (free < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        if (table->total == table->max) {
                /* No free vlan entries */
                err = -ENOSPC;
index 1bb16cb794331ba975eddcc5327f1fc13ef495c9..7670aac0e93f465c5505a6bb799658299724c3e4 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
  *
  * Returns 0 on success on < 0 on error.
  */
-int phy_clear_interrupt(struct phy_device *phydev)
+static int phy_clear_interrupt(struct phy_device *phydev)
 {
        int err = 0;
 
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
  *
  * Returns 0 on success on < 0 on error.
  */
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
 {
        int err = 0;
 
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
  *   duplexes.  Drop down by one in this order:  1000/FULL,
  *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  */
-void phy_sanitize_settings(struct phy_device *phydev)
+static void phy_sanitize_settings(struct phy_device *phydev)
 {
        u32 features = phydev->supported;
        int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
        phydev->speed = settings[idx].speed;
        phydev->duplex = settings[idx].duplex;
 }
-EXPORT_SYMBOL(phy_sanitize_settings);
 
 /**
  * phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  * phy_enable_interrupts - Enable the interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-int phy_enable_interrupts(struct phy_device *phydev)
+static int phy_enable_interrupts(struct phy_device *phydev)
 {
        int err;
 
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
 
        return err;
 }
-EXPORT_SYMBOL(phy_enable_interrupts);
 
 /**
  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-int phy_disable_interrupts(struct phy_device *phydev)
+static int phy_disable_interrupts(struct phy_device *phydev)
 {
        int err;
 
@@ -574,7 +572,6 @@ phy_err:
 
        return err;
 }
-EXPORT_SYMBOL(phy_disable_interrupts);
 
 /**
  * phy_start_interrupts - request and enable interrupts for a PHY device
index 16ddc77313cb08348b3cdbdb7016bd5c56fb9304..993c52c82aeb632e49745ddc462303795bfe44bd 100644 (file)
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
 static LIST_HEAD(phy_fixup_list);
 static DEFINE_MUTEX(phy_fixup_lock);
 
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+                            u32 flags, phy_interface_t interface);
+
 /*
  * Creates a new phy_fixup and adds it to the list
  * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_scan_fixups);
 
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+static struct phy_device* phy_device_create(struct mii_bus *bus,
+                                           int addr, int phy_id)
 {
        struct phy_device *dev;
 
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
 
        return dev;
 }
-EXPORT_SYMBOL(phy_device_create);
 
 /**
  * get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
  *   If you want to monitor your own link state, don't call
  *   this function.
  */
-void phy_prepare_link(struct phy_device *phydev,
+static void phy_prepare_link(struct phy_device *phydev,
                void (*handler)(struct net_device *))
 {
        phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
  *     the attaching device, and given a callback for link status
  *     change.  The phy_device is returned to the attaching driver.
  */
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
-                     u32 flags, phy_interface_t interface)
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+                            u32 flags, phy_interface_t interface)
 {
        struct device *d = &phydev->dev;
 
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
         * (dev_flags and interface) */
        return phy_init_hw(phydev);
 }
-EXPORT_SYMBOL(phy_attach_direct);
 
 /**
  * phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
  *   what is supported.  Returns < 0 on error, 0 if the PHY's advertisement
  *   hasn't changed, and > 0 if it has changed.
  */
-int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev)
 {
        u32 advertise;
        int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
 
        return changed;
 }
-EXPORT_SYMBOL(genphy_config_advert);
 
 /**
  * genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
  *   to the values in phydev. Assumes that the values are valid.
  *   Please see phy_sanitize_settings().
  */
-int genphy_setup_forced(struct phy_device *phydev)
+static int genphy_setup_forced(struct phy_device *phydev)
 {
        int err;
        int ctl = 0;
index 26c37d3a5868ce2bf7e73ba98da5456006c02c53..8ecc170c9b74f569f2e79be28f70dfc96097d50d 100644 (file)
 #define MAX_CMD_DESCRIPTORS            1024
 #define MAX_RCV_DESCRIPTORS_1G         4096
 #define MAX_RCV_DESCRIPTORS_10G        8192
+#define MAX_RCV_DESCRIPTORS_VF         2048
 #define MAX_JUMBO_RCV_DESCRIPTORS_1G   512
 #define MAX_JUMBO_RCV_DESCRIPTORS_10G  1024
 
 #define DEFAULT_RCV_DESCRIPTORS_1G     2048
 #define DEFAULT_RCV_DESCRIPTORS_10G    4096
+#define DEFAULT_RCV_DESCRIPTORS_VF     1024
 #define MAX_RDS_RINGS                   2
 
 #define get_next_index(index, length)  \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
 #define QLCNIC_LOOPBACK_TEST           2
 
 #define QLCNIC_FILTER_AGE      80
+#define QLCNIC_READD_AGE       20
 #define QLCNIC_LB_MAX_FILTERS  64
 
 struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
        u16 num_txd;
        u16 num_rxd;
        u16 num_jumbo_rxd;
+       u16 max_rxd;
+       u16 max_jumbo_rxd;
 
        u8 max_rds_rings;
        u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
 #define MAX_RX_QUEUES          4
 #define DEFAULT_MAC_LEARN      1
 
-#define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
 #define IS_VALID_BW(bw)                (bw >= MIN_BW && bw <= MAX_BW)
 #define IS_VALID_TX_QUEUES(que)        (que > 0 && que <= MAX_TX_QUEUES)
 #define IS_VALID_RX_QUEUES(que)        (que > 0 && que <= MAX_RX_QUEUES)
index 25e93a53fca0954e41728e9c55e31305ee62cc53..ec21d24015c485a7153e4262026f89d1c73af6c3 100644 (file)
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
        ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
        ring->tx_pending = adapter->num_txd;
 
-       if (adapter->ahw.port_type == QLCNIC_GBE) {
-               ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
-               ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
-       } else {
-               ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
-               ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
-       }
-
+       ring->rx_max_pending = adapter->max_rxd;
+       ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
        ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
 
        ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
                struct ethtool_ringparam *ring)
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
-       u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
-       u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
        u16 num_rxd, num_jumbo_rxd, num_txd;
 
-
        if (ring->rx_mini_pending)
                return -EOPNOTSUPP;
 
-       if (adapter->ahw.port_type == QLCNIC_GBE) {
-               max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
-               max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
-       }
-
        num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
-                       MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+                       MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
 
        num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
-                       MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+                       MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+                                               "rx jumbo");
 
        num_txd = qlcnic_validate_ringparam(ring->tx_pending,
                        MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
index f047c7c48314ccd48c76df8d68babb5d24c796d5..7a298cdf9ab398135b2f59df9c7ad642c8f8840b 100644 (file)
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
 
        dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
                        fw_major, fw_minor, fw_build);
-
        if (adapter->ahw.port_type == QLCNIC_XGBE) {
-               adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+                       adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+                       adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+               } else {
+                       adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+                       adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+               }
+
                adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
        } else if (adapter->ahw.port_type == QLCNIC_GBE) {
                adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
                adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+               adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
        }
 
        adapter->msix_supported = !!use_msi_x;
@@ -1860,6 +1870,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
        hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
                if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
                            tmp_fil->vlan_id == vlan_id) {
+
+                       if (jiffies >
+                           (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+                               qlcnic_change_filter(adapter, src_addr, vlan_id,
+                                                               tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
index a478786840a65e8f35a5a825263926fd0f7078d2..22821398fc63765054eff056ab2529beedfb30fc 100644 (file)
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
 int ql_core_dump(struct ql_adapter *qdev,
                struct ql_mpi_coredump *mpi_coredump);
 int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_wol(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
 int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-void qlge_set_multicast_list(struct net_device *ndev);
 
-#if 1
-#define QL_ALL_DUMP
-#define QL_REG_DUMP
-#define QL_DEV_DUMP
-#define QL_CB_DUMP
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
 /* #define QL_IB_DUMP */
 /* #define QL_OB_DUMP */
-#endif
 
 #ifdef QL_REG_DUMP
 extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
index ba0053d8515e24096854ee7cb7a07e99da32617b..c30e0fe55a314858828c2caa4d464b0c2301fe81 100644 (file)
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
 
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
 /* This hardware semaphore causes exclusive access to
  * resources shared between the NIC driver, MPI firmware,
  * FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 
 }
 
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+       qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+
+       if (qdev->vlgrp) {
+               u16 vid;
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
+                       if (!vlan_group_get_device(qdev->vlgrp, vid))
+                               continue;
+                       qlge_vlan_rx_add_vid(qdev->ndev, vid);
+               }
+       }
+}
+
 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
 {
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
                   "MAC address %pM\n", ndev->dev_addr);
 }
 
-int ql_wol(struct ql_adapter *qdev)
+static int ql_wol(struct ql_adapter *qdev)
 {
        int status = 0;
        u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
        clear_bit(QL_PROMISCUOUS, &qdev->flags);
        qlge_set_multicast_list(qdev->ndev);
 
+       /* Restore vlan setting. */
+       qlge_restore_vlan(qdev);
+
        ql_enable_interrupts(qdev);
        ql_enable_all_completion_interrupts(qdev);
        netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
        return &ndev->stats;
 }
 
-void qlge_set_multicast_list(struct net_device *ndev)
+static void qlge_set_multicast_list(struct net_device *ndev)
 {
        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
        struct netdev_hw_addr *ha;
index f84e8570c7cb79d4c161e90ccee1d420b55d256c..0e7c7c7ee1647006baf561ffd90308290fbd2399 100644 (file)
@@ -87,7 +87,7 @@ exit:
        return status;
 }
 
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
 {
        int status;
        status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
 /* Send and ACK mailbox command to the firmware to
  * let it continue with the change.
  */
-int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
 {
        struct mbox_params mbc;
        struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
        return status;
 }
 
-int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
        u32 size)
 {
        int status = 0;
index a9ae505e1baf1e0fb35e32c940a5c63d17e8d7ff..66c2f1a01963c27f9e7b3f1e50e01c964e0429e2 100644 (file)
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
        lp->rx_error_count = 0;
        lp->rx_error_dpc_count = 0;
        lp->rx_session_id[0] = 0x50;
-       lp->rx_session_id[0] = 0x48;
-       lp->rx_session_id[0] = 0x44;
-       lp->rx_session_id[0] = 0x42;
+       lp->rx_session_id[1] = 0x48;
+       lp->rx_session_id[2] = 0x44;
+       lp->rx_session_id[3] = 0x42;
        lp->rx_frame_id[0] = 0;
        lp->rx_frame_id[1] = 0;
        lp->rx_frame_id[2] = 0;
index 9265315baa0b29bdc2c7a9e20db20462dcf52ae5..3a0cc63428eec6d8222dee806b0594af9aa9f95c 100644 (file)
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
 
        if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
                printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
-               err = -EAGAIN;
+               return -EAGAIN;
        }
 
        err = init_seeq(dev, sp, sregs);
index ac279fad9d450d28489a908a39884678676c3f73..ab9e3b785b5b1bf86b593160f97c213d191f2e0a 100644 (file)
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
        return 0;
 }
 
-
-/* VJ header compression */
-EXPORT_SYMBOL(slhc_init);
-EXPORT_SYMBOL(slhc_free);
-EXPORT_SYMBOL(slhc_remember);
-EXPORT_SYMBOL(slhc_compress);
-EXPORT_SYMBOL(slhc_uncompress);
-EXPORT_SYMBOL(slhc_toss);
-
 #else /* CONFIG_INET */
 
-
 int
 slhc_toss(struct slcompress *comp)
 {
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
   printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
   return NULL;
 }
+
+#endif /* CONFIG_INET */
+
+/* VJ header compression */
 EXPORT_SYMBOL(slhc_init);
 EXPORT_SYMBOL(slhc_free);
 EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
 EXPORT_SYMBOL(slhc_uncompress);
 EXPORT_SYMBOL(slhc_toss);
 
-#endif /* CONFIG_INET */
 MODULE_LICENSE("Dual BSD/GPL");
index a8e5856ce8821b4f441173be994d90bd021b9bcb..64bfdae5956fee14160d26bca07670c5025b74eb 100644 (file)
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
        } else {
                /* Try reading mac address from device. if EEPROM is present
                 * it will already have been set */
-               smsc911x_read_mac_address(dev);
+               smsc_get_mac(dev);
 
                if (is_valid_ether_addr(dev->dev_addr)) {
                        /* eeprom values are valid  so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
 /* Entry point for loading the module */
 static int __init smsc911x_init_module(void)
 {
+       SMSC_INITIALIZE();
        return platform_driver_register(&smsc911x_driver);
 }
 
index 016360c65ce2ded5afe129f91139a9582fbef485..52f38e12a879db9d344fb80f169f05eba31fa860 100644 (file)
 #define LPA_PAUSE_ALL                  (LPA_PAUSE_CAP | \
                                         LPA_PAUSE_ASYM)
 
+/*
+ * Provide hooks to let the arch add to the initialisation procedure
+ * and to override the source of the MAC address.
+ */
+#define SMSC_INITIALIZE()              do {} while (0)
+#define smsc_get_mac(dev)              smsc911x_read_mac_address((dev))
+
+#ifdef CONFIG_SMSC911X_ARCH_HOOKS
+#include <asm/smsc911x.h>
+#endif
+
 #endif                         /* __SMSC911X_H__ */
index 852e917778f8a51643096f648d9020c2a7963725..30ccbb6d097af220dced34f430723a3b3c5a9dca 100644 (file)
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
            !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
                return -EINVAL;
 
+       device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
        spin_lock_bh(&tp->lock);
-       if (wol->wolopts & WAKE_MAGIC) {
+       if (device_may_wakeup(dp))
                tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
-               device_set_wakeup_enable(dp, true);
-       } else {
+       else
                tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
-               device_set_wakeup_enable(dp, false);
-       }
        spin_unlock_bh(&tp->lock);
 
+
        return 0;
 }
 
index 663b8860a5317a4f37a41813e79fda4bd8159792..793020347e543524708512d5ad0e0681ba0ad1fd 100644 (file)
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
                tmp = schedule_timeout_interruptible(tmp);
        } while(time_after(tmp, jiffies));
 #else
-       udelay(time);
+       mdelay(time / 1000);
 #endif
 }
 
index 1cc67138adbf4a248294a029295df268bfbbc710..5b83c3f35f47c3421a518aaa04fb39224dac2ed3 100644 (file)
        3XP Processor. It has been tested on x86 and sparc64.
 
        KNOWN ISSUES:
-       *) The current firmware always strips the VLAN tag off, even if
-               we tell it not to. You should filter VLANs at the switch
-               as a workaround (good practice in any event) until we can
-               get this fixed.
        *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
                issue. Hopefully 3Com will fix it.
        *) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
        struct pci_dev *        pdev;
        struct net_device *     dev;
        struct napi_struct      napi;
-       spinlock_t              state_lock;
-       struct vlan_group *     vlgrp;
        struct basic_ring       rxHiRing;
        struct basic_ring       rxBuffRing;
        struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
        return err;
 }
 
-static void
-typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-       struct typhoon *tp = netdev_priv(dev);
-       struct cmd_desc xp_cmd;
-       int err;
-
-       spin_lock_bh(&tp->state_lock);
-       if(!tp->vlgrp != !grp) {
-               /* We've either been turned on for the first time, or we've
-                * been turned off. Update the 3XP.
-                */
-               if(grp)
-                       tp->offload |= TYPHOON_OFFLOAD_VLAN;
-               else
-                       tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
-
-               /* If the interface is up, the runtime is running -- and we
-                * must be up for the vlan core to call us.
-                *
-                * Do the command outside of the spin lock, as it is slow.
-                */
-               INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
-                                       TYPHOON_CMD_SET_OFFLOAD_TASKS);
-               xp_cmd.parm2 = tp->offload;
-               xp_cmd.parm3 = tp->offload;
-               spin_unlock_bh(&tp->state_lock);
-               err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
-               if(err < 0)
-                       netdev_err(tp->dev, "vlan offload error %d\n", -err);
-               spin_lock_bh(&tp->state_lock);
-       }
-
-       /* now make the change visible */
-       tp->vlgrp = grp;
-       spin_unlock_bh(&tp->state_lock);
-}
-
 static inline void
 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
                        u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
                first_txd->processFlags |=
                    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
                first_txd->processFlags |=
-                   cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
+                   cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
                                TYPHOON_TX_PF_VLAN_TAG_SHIFT);
        }
 
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
                filter |= TYPHOON_RX_FILTER_MCAST_HASH;
        }
 
-       INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+       INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
        xp_cmd.parm1 = filter;
        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 }
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
        return 1;
 }
 
+static int
+typhoon_set_flags(struct net_device *dev, u32 data)
+{
+       /* There's no way to turn off the RX VLAN offloading and stripping
+        * on the current 3XP firmware -- it does not respect the offload
+        * settings -- so we only allow the user to toggle the TX processing.
+        */
+       if (!(data & ETH_FLAG_RXVLAN))
+               return -EINVAL;
+
+       return ethtool_op_set_flags(dev, data,
+                                   ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+}
+
 static void
 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 {
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
        .set_sg                 = ethtool_op_set_sg,
        .set_tso                = ethtool_op_set_tso,
        .get_ringparam          = typhoon_get_ringparam,
+       .set_flags              = typhoon_set_flags,
+       .get_flags              = ethtool_op_get_flags,
 };
 
 static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
 
        tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
        tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+       tp->offload |= TYPHOON_OFFLOAD_VLAN;
 
        spin_lock_init(&tp->command_lock);
-       spin_lock_init(&tp->state_lock);
 
        /* Force the writes to the shared memory area out before continuing. */
        wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
        tp->rxHiRing.lastWrite = 0;
        tp->rxBuffRing.lastWrite = 0;
        tp->cmdRing.lastWrite = 0;
-       tp->cmdRing.lastWrite = 0;
+       tp->respRing.lastWrite = 0;
 
        tp->txLoRing.lastRead = 0;
        tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
                } else
                        skb_checksum_none_assert(new_skb);
 
-               spin_lock(&tp->state_lock);
-               if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
-                       vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
-                                                ntohl(rx->vlanTag) & 0xffff);
-               else
-                       netif_receive_skb(new_skb);
-               spin_unlock(&tp->state_lock);
+               if (rx->rxStatus & TYPHOON_RX_VLAN)
+                       __vlan_hwaccel_put_tag(new_skb,
+                                              ntohl(rx->vlanTag) & 0xffff);
+               netif_receive_skb(new_skb);
 
                received++;
                budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
                goto error_out;
 
        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
-       spin_lock_bh(&tp->state_lock);
        xp_cmd.parm2 = tp->offload;
        xp_cmd.parm3 = tp->offload;
        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
-       spin_unlock_bh(&tp->state_lock);
        if(err < 0)
                goto error_out;
 
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
        if(!netif_running(dev))
                return 0;
 
-       spin_lock_bh(&tp->state_lock);
-       if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
-               spin_unlock_bh(&tp->state_lock);
-               netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
-               return -EBUSY;
-       }
-       spin_unlock_bh(&tp->state_lock);
+       /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+       if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+               netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
 
        netif_device_detach(dev);
 
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = typhoon_set_mac_address,
        .ndo_change_mtu         = eth_change_mtu,
-       .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
 };
 
 static int __devinit
index 37108fb226d38e5a08a99e3dbaf2134aff6eedd6..969c751ee404fae24e73614703fe44e6141d74f9 100644 (file)
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
 
 /* features */
 enum {
-       UPT1_F_RXCSUM           = 0x0001,   /* rx csum verification */
-       UPT1_F_RSS              = 0x0002,
-       UPT1_F_RXVLAN           = 0x0004,   /* VLAN tag stripping */
-       UPT1_F_LRO              = 0x0008,
+       UPT1_F_RXCSUM           = cpu_to_le64(0x0001),   /* rx csum verification */
+       UPT1_F_RSS              = cpu_to_le64(0x0002),
+       UPT1_F_RXVLAN           = cpu_to_le64(0x0004),   /* VLAN tag stripping */
+       UPT1_F_LRO              = cpu_to_le64(0x0008),
 };
 #endif
index ca7727b940adfd62c58bb01c32737a9c88dedcc0..4d84912c99bae05f19eec5abf04632e077c1cc4f 100644 (file)
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
 #define VMXNET3_PM_MAX_PATTERN_SIZE   128
 #define VMXNET3_PM_MAX_MASK_SIZE      (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
 
-#define VMXNET3_PM_WAKEUP_MAGIC       0x01  /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER      0x02  /* wake up on pkts matching
-                                            * filters */
+#define VMXNET3_PM_WAKEUP_MAGIC       cpu_to_le16(0x01)  /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER      cpu_to_le16(0x02)  /* wake up on pkts matching
+                                                         * filters */
 
 
 struct Vmxnet3_PM_PktFilter {
index 3f60e0e3097bdee3237ebdae7ef2d66844ce1669..e3658e10db390dd2ef74d62bd9c34004abe2d4dd 100644 (file)
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                        adapter->vlan_grp = grp;
 
                        /* update FEATURES to device */
-                       set_flag_le64(&devRead->misc.uptFeatures,
-                                     UPT1_F_RXVLAN);
+                       devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
                        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                               VMXNET3_CMD_UPDATE_FEATURE);
                        /*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
                adapter->vlan_grp = NULL;
 
-               if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
+               if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
                        int i;
 
                        for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                                               VMXNET3_CMD_UPDATE_VLAN_FILTERS);
 
                        /* update FEATURES to device */
-                       reset_flag_le64(&devRead->misc.uptFeatures,
-                                       UPT1_F_RXVLAN);
+                       devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
                        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                               VMXNET3_CMD_UPDATE_FEATURE);
                }
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 
        /* set up feature flags */
        if (adapter->rxcsum)
-               set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
+               devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
 
        if (adapter->lro) {
-               set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+               devRead->misc.uptFeatures |= UPT1_F_LRO;
                devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
        }
        if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
            adapter->vlan_grp) {
-               set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
+               devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
        }
 
        devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
                memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
                pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
 
-               set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+               pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
                i++;
        }
 
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
                pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
                in_dev_put(in_dev);
 
-               set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+               pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
                i++;
        }
 
 skip_arp:
        if (adapter->wol & WAKE_MAGIC)
-               set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
+               pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
 
        pmConf->numFilters = i;
 
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
        adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
        adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
                                                                  *pmConf));
-       adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+       adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
                                                                 pmConf));
 
        netif_device_attach(netdev);
index 7e4b5a89165a5804dcb68dd78d225163ea71efd7..b79070bcc92efa592e9e8c1689d9e7061b8788c0 100644 (file)
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
                adapter->rxcsum = val;
                if (netif_running(netdev)) {
                        if (val)
-                               set_flag_le64(
-                               &adapter->shared->devRead.misc.uptFeatures,
-                               UPT1_F_RXCSUM);
+                               adapter->shared->devRead.misc.uptFeatures |=
+                               UPT1_F_RXCSUM;
                        else
-                               reset_flag_le64(
-                               &adapter->shared->devRead.misc.uptFeatures,
-                               UPT1_F_RXCSUM);
+                               adapter->shared->devRead.misc.uptFeatures &=
+                               ~UPT1_F_RXCSUM;
 
                        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                               VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
                /* update harware LRO capability accordingly */
                if (lro_requested)
                        adapter->shared->devRead.misc.uptFeatures |=
-                                               cpu_to_le64(UPT1_F_LRO);
+                                                       UPT1_F_LRO;
                else
                        adapter->shared->devRead.misc.uptFeatures &=
-                                               cpu_to_le64(~UPT1_F_LRO);
+                                                       ~UPT1_F_LRO;
                VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                       VMXNET3_CMD_UPDATE_FEATURE);
        }
index c88ea5cbba0d064a85563f78780016a42badeb05..8a2f4712284cfcbf0f2851547e43a2ef5f0fd595 100644 (file)
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
        struct net_device              *netdev;
        struct pci_dev                 *pdev;
 
-       u8                              *hw_addr0; /* for BAR 0 */
-       u8                              *hw_addr1; /* for BAR 1 */
+       u8                      __iomem *hw_addr0; /* for BAR 0 */
+       u8                      __iomem *hw_addr1; /* for BAR 1 */
 
        /* feature control */
        bool                            rxcsum;
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
 #define VMXNET3_MAX_ETH_HDR_SIZE    22
 #define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
 
-static inline void set_flag_le16(__le16 *data, u16 flag)
-{
-       *data = cpu_to_le16(le16_to_cpu(*data) | flag);
-}
-
-static inline void set_flag_le64(__le64 *data, u64 flag)
-{
-       *data = cpu_to_le64(le64_to_cpu(*data) | flag);
-}
-
-static inline void reset_flag_le64(__le64 *data, u64 flag)
-{
-       *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
-}
-
 int
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
 
index 0e6db5935609cf47efb6c1f2788e6bfeac496db8..906a3ca3676b94c3aa6d8442bd7e56b9d0fa3eca 100644 (file)
 #include "vxge-traffic.h"
 #include "vxge-config.h"
 
+static enum vxge_hw_status
+__vxge_hw_fifo_create(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       struct vxge_hw_fifo_attr *attr);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_abort(
+       struct __vxge_hw_fifo *fifoh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_reset(
+       struct __vxge_hw_fifo *ringh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
+                       u32 size);
+
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
+                       struct __vxge_hw_blockpool_entry *entry);
+
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+                                       void *block_addr,
+                                       u32 length,
+                                       struct pci_dev *dma_h,
+                                       struct pci_dev *acc_handle);
+
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+                       struct __vxge_hw_blockpool  *blockpool,
+                       u32 pool_size,
+                       u32 pool_max);
+
+static void
+__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
+
+static void *
+__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
+                       u32 size,
+                       struct vxge_hw_mempool_dma *dma_object);
+
+static void
+__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
+                       void *memblock,
+                       u32 size,
+                       struct vxge_hw_mempool_dma *dma_object);
+
+
+static struct __vxge_hw_channel*
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+                       enum __vxge_hw_channel_type type, u32 length,
+                       u32 per_dtr_space, void *userdata);
+
+static void
+__vxge_hw_channel_free(
+       struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(
+       struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_reset(
+       struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+
+static void
+__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(
+       u32 vp_id,
+       struct vxge_hw_vpath_reg __iomem *vpath_reg,
+       struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(
+       void __iomem    *reg,
+       u64 mask, u32 max_millis);
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+                         u64 mask, u32 max_millis)
+{
+       __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+       wmb();
+
+       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+       wmb();
+
+       return  __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static struct vxge_hw_mempool*
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
+                        u32 item_size, u32 private_size, u32 items_initial,
+                        u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+                        void *userdata);
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+                         struct vxge_hw_vpath_stats_hw_info *hw_stats);
+
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+
+static u64
+__vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
+                                 struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+static u32
+__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+                        u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+
+
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+                          struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+                            u32 operation, u32 offset, u64 *stat);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+
 /*
  * __vxge_hw_channel_allocate - Allocate memory for channel
  * This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
  * Will poll certain register for specified amount of time.
  * Will poll until masked bit is not cleared.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
 {
        u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
  * in progress
  * This routine checks the vpath reset in progress register is turned zero
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
 {
        enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
  * This routine sets the swapper and reads the toc pointer and returns the
  * memory mapped address of the toc
  */
-struct vxge_hw_toc_reg __iomem *
+static struct vxge_hw_toc_reg __iomem *
 __vxge_hw_device_toc_get(void __iomem *bar0)
 {
        u64 val64;
@@ -779,7 +952,7 @@ exit:
  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
  * Get the Statistics on aggregate port
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
                                   struct vxge_hw_xmac_aggr_stats *aggr_stats)
 {
@@ -814,7 +987,7 @@ exit:
  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
  * Get the Statistics on port
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
                                   struct vxge_hw_xmac_port_stats *port_stats)
 {
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
        return 0;
 #endif
 }
-/*
- * vxge_hw_device_debug_mask_get - Get the debug mask
- * This routine returns the current debug mask set
- */
-u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
-       if (hldev == NULL)
-               return 0;
-       return hldev->debug_module_mask;
-#else
-       return 0;
-#endif
-}
 
 /*
  * vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
  *             first block
  * Returns the dma address of the first RxD block
  */
-u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
 {
        struct vxge_hw_mempool_dma *dma_object;
 
@@ -1252,7 +1411,7 @@ exit:
  * This function creates Ring and initializes it.
  *
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
                      struct vxge_hw_ring_attr *attr)
 {
@@ -1363,7 +1522,7 @@ exit:
  * __vxge_hw_ring_abort - Returns the RxD
  * This function terminates the RxDs of ring
  */
-enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
 {
        void *rxdh;
        struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
  * __vxge_hw_ring_reset - Resets the ring
  * This function resets the ring during vpath reset operation
  */
-enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
  * __vxge_hw_ring_delete - Removes the ring
  * This function freeup the memory pool and removes the ring
  */
-enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
 {
        struct __vxge_hw_ring *ring = vp->vpath->ringh;
 
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
  * __vxge_hw_mempool_grow
  * Will resize mempool up to %num_allocate value.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
                       u32 *num_allocated)
 {
@@ -1527,7 +1686,7 @@ exit:
  * with size enough to hold %items_initial number of items. Memory is
  * DMA-able but client must map/unmap before interoperating with the device.
  */
-struct vxge_hw_mempool*
+static struct vxge_hw_mempool*
 __vxge_hw_mempool_create(
        struct __vxge_hw_device *devh,
        u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
 /*
  * vxge_hw_mempool_destroy
  */
-void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
 {
        u32 i, j;
        struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
  * Check the vpath configuration
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
 {
        enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
  * Set the swapper bits appropriately for the lagacy section.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
 {
        u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  * Set the swapper bits appropriately for the vpath.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
 {
 #ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
  * Set the swapper bits appropriately for the vpath.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_kdfc_swapper_set(
        struct vxge_hw_legacy_reg __iomem *legacy_reg,
        struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2020,28 +2179,6 @@ __vxge_hw_kdfc_swapper_set(
        return VXGE_HW_OK;
 }
 
-/*
- * vxge_hw_mgmt_device_config - Retrieve device configuration.
- * Get device configuration. Permits to retrieve at run-time configuration
- * values that were used to initialize and configure the device.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
-                          struct vxge_hw_device_config *dev_config, int size)
-{
-
-       if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
-               return VXGE_HW_ERR_INVALID_DEVICE;
-
-       if (size != sizeof(struct vxge_hw_device_config))
-               return VXGE_HW_ERR_VERSION_CONFLICT;
-
-       memcpy(dev_config, &hldev->config,
-               sizeof(struct vxge_hw_device_config));
-
-       return VXGE_HW_OK;
-}
-
 /*
  * vxge_hw_mgmt_reg_read - Read Titan register.
  */
@@ -2438,7 +2575,7 @@ exit:
  * __vxge_hw_fifo_abort - Returns the TxD
  * This function terminates the TxDs of fifo
  */
-enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
 {
        void *txdlh;
 
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
  * __vxge_hw_fifo_reset - Resets the fifo
  * This function resets the fifo during vpath reset operation
  */
-enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
 
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
  *                          in pci config space.
  * Read from the vpath pci config space.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
                         u32 phy_func_0, u32 offset, u32 *val)
 {
@@ -2542,7 +2679,7 @@ exit:
  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
  * Returns the function number of the vpath.
  */
-u32
+static u32
 __vxge_hw_vpath_func_id_get(u32 vp_id,
        struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
 {
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
  * part number and product description.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_card_info_get(
        u32 vp_id,
        struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
  * __vxge_hw_vpath_fw_ver_get - Get the fw version
  * Returns FW Version
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_fw_ver_get(
        u32 vp_id,
        struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
  * Returns pci function mode
  */
-u64
+static u64
 __vxge_hw_vpath_pci_func_mode_get(
        u32  vp_id,
        struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
  *               from MAC address table.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_addr_get(
        u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
        u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
  * This routine checks the vpath_rst_in_prog register to see if
  * adapter completed the reset process for the vpath
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
 {
        enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
  * __vxge_hw_vpath_reset
  * This routine resets the vpath on the device
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
  * __vxge_hw_vpath_sw_reset
  * This routine resets the vpath structures
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
  * This routine configures the prc registers of virtual path using the config
  * passed
  */
-void
+static void
 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  * This routine configures the kdfc registers of virtual path using the
  * config passed
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3553,7 +3690,7 @@ exit:
  * __vxge_hw_vpath_mac_configure
  * This routine configures the mac of virtual path using the config passed
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  * This routine configures the tim registers of virtual path using the config
  * passed
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
  * This routine is the final phase of init which initializes the
  * registers of the vpath using the configuration passed.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
@@ -3966,7 +4103,7 @@ exit:
  * This routine is the initial phase of init which resets the vpath and
  * initializes the software support structures.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
                        struct vxge_hw_vp_config *config)
 {
@@ -4022,7 +4159,7 @@ exit:
  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
  * This routine closes all channels it opened and freeup memory
  */
-void
+static void
 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
  * Enable the DMA vpath statistics. The function is to be called to re-enable
  * the adapter to update stats into the host memory
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
  *                           and offset and perform an operation
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
                             u32 operation, u32 offset, u64 *stat)
 {
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
 /*
  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_xmac_tx_stats_get(
        struct __vxge_hw_virtualpath *vpath,
        struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
 /*
  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                       struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
 {
        u64 *val64;
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
 /*
  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
  */
-enum vxge_hw_status __vxge_hw_vpath_stats_get(
-                       struct __vxge_hw_virtualpath *vpath,
-                       struct vxge_hw_vpath_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
 {
        u64 val64;
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
        return status;
 }
 
+
+static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
+                                       unsigned long size)
+{
+       gfp_t flags;
+       void *vaddr;
+
+       if (in_interrupt())
+               flags = GFP_ATOMIC | GFP_DMA;
+       else
+               flags = GFP_KERNEL | GFP_DMA;
+
+       vaddr = kmalloc((size), flags);
+
+       vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+                            struct pci_dev **p_dma_acch)
+{
+       unsigned long misaligned = *(unsigned long *)p_dma_acch;
+       u8 *tmp = (u8 *)vaddr;
+       tmp -= misaligned;
+       kfree((void *)tmp);
+}
+
 /*
  * __vxge_hw_blockpool_create - Create block pool
  */
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
  * Adds a block to block pool
  */
-void vxge_hw_blockpool_block_add(
-                       struct __vxge_hw_device *devh,
-                       void *block_addr,
-                       u32 length,
-                       struct pci_dev *dma_h,
-                       struct pci_dev *acc_handle)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+                                       void *block_addr,
+                                       u32 length,
+                                       struct pci_dev *dma_h,
+                                       struct pci_dev *acc_handle)
 {
        struct __vxge_hw_blockpool  *blockpool;
        struct __vxge_hw_blockpool_entry  *entry = NULL;
index 1a94343023cb20fdb7f0f3a5b4d4b83ab1b9e1b3..5c00861b6c2c08351f1f52023f4037dfaad45236 100644 (file)
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
        char    version[VXGE_HW_FW_STRLEN];
 };
 
-u64
-__vxge_hw_vpath_pci_func_mode_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
 /**
  * struct vxge_hw_fifo_config - Configuration of fifo.
  * @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
        u8 hash_type_ipv6ex_en;
 };
 
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
 void vxge_hw_device_debug_set(
        struct __vxge_hw_device *devh,
        enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
 u32
 vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
 
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
 /**
  * vxge_hw_ring_rxd_size_get   - Get the size of ring descriptor.
  * @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
        struct vxge_hw_fifo_attr        fifo_attr;
 };
 
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
-                       struct __vxge_hw_blockpool  *blockpool,
-                       u32 pool_size,
-                       u32 pool_max);
-
-void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
-
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
-                       u32 size);
-
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
-                       struct __vxge_hw_blockpool_entry *entry);
-
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
-                       u32 size,
-                       struct vxge_hw_mempool_dma *dma_object);
-
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
-                       void *memblock,
-                       u32 size,
-                       struct vxge_hw_mempool_dma *dma_object);
-
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
-               struct vxge_hw_device_config    *dev_config, int size);
-
 enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
        void __iomem *bar0,
        struct vxge_hw_device_hw_info *hw_info);
 
-enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
-       u32     vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info);
-
 enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
        struct vxge_hw_device_config *device_config);
 
@@ -1954,38 +1893,6 @@ out:
        return vaddr;
 }
 
-extern void vxge_hw_blockpool_block_add(
-                       struct __vxge_hw_device *devh,
-                       void *block_addr,
-                       u32 length,
-                       struct pci_dev *dma_h,
-                       struct pci_dev *acc_handle);
-
-static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
-                                       unsigned long size)
-{
-       gfp_t flags;
-       void *vaddr;
-
-       if (in_interrupt())
-               flags = GFP_ATOMIC | GFP_DMA;
-       else
-               flags = GFP_KERNEL | GFP_DMA;
-
-       vaddr = kmalloc((size), flags);
-
-       vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
-                       struct pci_dev **p_dma_acch)
-{
-       unsigned long misaligned = *(unsigned long *)p_dma_acch;
-       u8 *tmp = (u8 *)vaddr;
-       tmp -= misaligned;
-       kfree((void *)tmp);
-}
-
 /*
  * __vxge_hw_mempool_item_priv - will return pointer on per item private space
  */
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
                            (*memblock_item_idx) * mempool->items_priv_size;
 }
 
-enum vxge_hw_status
-__vxge_hw_mempool_grow(
-       struct vxge_hw_mempool *mempool,
-       u32 num_allocate,
-       u32 *num_allocated);
-
-struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
-       struct __vxge_hw_device *devh,
-       u32 memblock_size,
-       u32 item_size,
-       u32 private_size,
-       u32 items_initial,
-       u32 items_max,
-       struct vxge_hw_mempool_cbs *mp_callback,
-       void *userdata);
-
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-                       enum __vxge_hw_channel_type type, u32 length,
-                       u32 per_dtr_space, void *userdata);
-
-void
-__vxge_hw_channel_free(
-       struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_initialize(
-       struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_reset(
-       struct __vxge_hw_channel *channel);
-
 /*
  * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
  * for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
        struct vxge_hw_vpath_attr *attr,
        struct __vxge_hw_vpath_handle **vpath_handle);
 
-enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
-
 enum vxge_hw_status vxge_hw_vpath_close(
        struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
        struct __vxge_hw_vpath_handle *vpath_handle,
        u32 new_mtu);
 
-enum vxge_hw_status vxge_hw_vpath_stats_enable(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_access(
-       struct __vxge_hw_virtualpath    *vpath,
-       u32                     operation,
-       u32                     offset,
-       u64                     *stat);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
-       struct __vxge_hw_virtualpath    *vpath,
-       struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(
-       struct __vxge_hw_virtualpath    *vpath,
-       struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_get(
-       struct __vxge_hw_virtualpath *vpath,
-       struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
 void
 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
 
-enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
-
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_device_register_poll(
-       void __iomem    *reg,
-       u64 mask, u32 max_millis);
 
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
        writel(val, addr);
 }
 
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
-                         u64 mask, u32 max_millis)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
-       wmb();
-       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
-       wmb();
-
-       status = __vxge_hw_device_register_poll(addr, mask, max_millis);
-       return status;
-}
-
-struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0);
-
-enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
 enum vxge_hw_status
 vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
 
-enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_vpath_pci_read(
-       struct __vxge_hw_virtualpath    *vpath,
-       u32                     phy_func_0,
-       u32                     offset,
-       u32                     *val);
-
-enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN]);
-
-u32
-__vxge_hw_vpath_func_id_get(
-       u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
 enum vxge_hw_status
 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
 /**
  * vxge_debug
  * @level: level of debug verbosity.
index 05679e306fdd5711bc15831fda2d95c0fe54c81a..b67746eef923e2cdeb9886130796e375d3b34818 100644 (file)
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
        .get_ethtool_stats      = vxge_get_ethtool_stats,
 };
 
-void initialize_ethtool_ops(struct net_device *ndev)
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
 {
        SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
 }
index a69542ecb68de8592e7f9791d4c627104caba046..813829f3d0242be97ff22f97bf1724b7db3c8734 100644 (file)
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
 
 static struct vxge_drv_config *driver_config;
 
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
        return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
  * This function is called during interrupt context to notify link up state
  * change.
  */
-void
+static void
 vxge_callback_link_up(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
  * This function is called during interrupt context to notify link down state
  * change.
  */
-void
+static void
 vxge_callback_link_down(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
  * If the interrupt is because of a received frame or if the receive ring
  * contains fresh as yet un-processed frames, this function is called.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                 u8 t_code, void *userdata)
 {
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
  * freed and frees all skbs whose data have already DMA'ed into the NICs
  * internal memory.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
                enum vxge_hw_fifo_tcode t_code, void *userdata,
                struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
  *
  * Enables the interrupts for the vpath
 */
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
        int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
  *
  * Disables the interrupts for the vpath
 */
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
        int msix_id;
@@ -1553,7 +1563,7 @@ out:
  *
  * driver may reset the chip on events of serr, eccerr, etc
  */
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
 {
        return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
 }
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
        return status;
 }
 
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct vxge_mac_addrs *new_mac_entry;
        u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 }
 
 /* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
        return status;
 }
 
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct list_head *entry, *next;
        u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
        return FALSE;
 }
 /* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
 }
 
 /* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 }
 
 /* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 }
 
 /* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 }
 
 /* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
 {
        struct vxge_vpath *vpath;
        int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
 }
 
 /* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
 {
        struct vxge_hw_vpath_attr attr;
        enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-int
+static int
 vxge_open(struct net_device *dev)
 {
        enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
 }
 
 /* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
 {
 
        struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
        }
 }
 
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
 {
        enum vxge_hw_status status;
        struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-int
+static int
 vxge_close(struct net_device *dev)
 {
        do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
-                                  struct vxge_config *config,
-                                  int high_dma, int no_of_vpath,
-                                  struct vxgedev **vdev_out)
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+                                         struct vxge_config *config,
+                                         int high_dma, int no_of_vpath,
+                                         struct vxgedev **vdev_out)
 {
        struct net_device *ndev;
        enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 
        ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
 
-       initialize_ethtool_ops(ndev);
+       vxge_initialize_ethtool_ops(ndev);
 
        /* Allocate memory for vpath */
        vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
  *
  * This function will unregister and free network device
  */
-void
+static void
 vxge_device_unregister(struct __vxge_hw_device *hldev)
 {
        struct vxgedev *vdev;
index d4be07eaacd72d007faa4cfdeba4dc61bf645ee8..de64536cb7d0d1c32943b6d7b64762e306f8bcb8 100644 (file)
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
                mod_timer(&timer, (jiffies + exp)); \
        } while (0);
 
-int __devinit vxge_device_register(struct __vxge_hw_device *devh,
-                                   struct vxge_config *config,
-                                   int high_dma, int no_of_vpath,
-                                   struct vxgedev **vdev);
-
-void vxge_device_unregister(struct __vxge_hw_device *devh);
-
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
-
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
-
-void vxge_callback_link_up(struct __vxge_hw_device *devh);
-
-void vxge_callback_link_down(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-       struct macInfo *mac);
-
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-
-int vxge_reset(struct vxgedev *vdev);
-
-enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
-       u8 t_code, void *userdata);
-
-enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
-       enum vxge_hw_fifo_tcode t_code, void *userdata,
-       struct sk_buff ***skb_ptr, int nr_skbs, int *more);
-
-int vxge_close(struct net_device *dev);
-
-int vxge_open(struct net_device *dev);
-
-void vxge_close_vpaths(struct vxgedev *vdev, int index);
-
-int vxge_open_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-       struct macInfo *mac);
-
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
-       struct macInfo *mac);
-
-int vxge_mac_list_add(struct vxge_vpath *vpath,
-       struct macInfo *mac);
-
-void vxge_free_mac_add_list(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-
-int do_vxge_close(struct net_device *dev, int do_io);
-extern void initialize_ethtool_ops(struct net_device *ndev);
+extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
 /**
  * #define VXGE_DEBUG_INIT: debug for initialization functions
  * #define VXGE_DEBUG_TX        : debug transmit related functions
index cedf08f99cb3dbf2a2b94e198ef168c4102dc6a5..4bdb611a6842dd6c5bd6a0f7a2a1893c254869f5 100644 (file)
 #include "vxge-config.h"
 #include "vxge-main.h"
 
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
+                             u32 vp_id, enum vxge_hw_event type);
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+                             u32 skip_alarms);
+
 /*
  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  * @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
  * Link up indication handler. The function is invoked by HW when
  * Titan indicates that the link is up for programmable amount of time.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
 {
        /*
@@ -538,7 +545,7 @@ exit:
  * Link down indication handler. The function is invoked by HW when
  * Titan indicates that the link is down.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
 {
        /*
@@ -564,7 +571,7 @@ exit:
  *
  * Handle error.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_handle_error(
                struct __vxge_hw_device *hldev,
                u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  * it swaps the reserve and free arrays.
  *
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
 {
        void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
  * Posts a dtr to work array.
  *
  */
-void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
+                                    void *dtrh)
 {
        vxge_assert(channel->work_arr[channel->post_index] == NULL);
 
@@ -1657,37 +1665,6 @@ exit:
        return status;
 }
 
-/**
- * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
- *               from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the next vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
-       u64 data;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_get(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, vid, &data);
-
-       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
-       return status;
-}
-
 /**
  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  *               to vlan id table.
@@ -1898,9 +1875,9 @@ exit:
  * Process vpath alarms.
  *
  */
-enum vxge_hw_status __vxge_hw_vpath_alarm_process(
-                       struct __vxge_hw_virtualpath *vpath,
-                       u32 skip_alarms)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+                             u32 skip_alarms)
 {
        u64 val64;
        u64 alarm_status;
@@ -2264,36 +2241,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
                &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
 }
 
-/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id:  MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
-       struct __vxge_hw_device *hldev = vp->vpath->hldev;
-       if (hldev->config.intr_mode ==
-                       VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
-               __vxge_hw_pio_mem_write32_upper(
-                       (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
-                               &hldev->common_reg->
-                                       clr_msix_one_shot_vec[msix_id%4]);
-       } else {
-               __vxge_hw_pio_mem_write32_upper(
-                       (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
-                               &hldev->common_reg->
-                                       clear_msix_mask_vect[msix_id%4]);
-       }
-}
-
 /**
  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  * @vp: Virtual Path handle.
@@ -2315,22 +2262,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
                        &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
 }
 
-/**
- * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
- * @vp: Virtual Path handle.
- *
- * The function masks all msix interrupt for the given vpath
- *
- */
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
-{
-
-       __vxge_hw_pio_mem_write32_upper(
-               (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
-               &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-}
-
 /**
  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  * @vp: Virtual Path handle.
index 6fa07d13798e7aef3efec0ace046cd794c169a65..9890d4d596d0d29a5b8083656e416b1c30ca2b74 100644 (file)
@@ -1748,14 +1748,6 @@ vxge_hw_mrpcim_stats_access(
        u32 offset,
        u64 *stat);
 
-enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
-                                  struct vxge_hw_xmac_aggr_stats *aggr_stats);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
-                                  struct vxge_hw_xmac_port_stats *port_stats);
-
 enum vxge_hw_status
 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
                              struct vxge_hw_xmac_stats *xmac_stats);
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
 #endif
 };
 
-/* ========================= RING PRIVATE API ============================= */
-u64
-__vxge_hw_ring_first_block_address_get(
-       struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_create(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       struct vxge_hw_ring_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_ring_abort(
-       struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_reset(
-       struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_delete(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
 /* ========================= FIFO PRIVATE API ============================= */
 
 struct vxge_hw_fifo_attr;
 
-enum vxge_hw_status
-__vxge_hw_fifo_create(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       struct vxge_hw_fifo_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_fifo_abort(
-       struct __vxge_hw_fifo *fifoh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_reset(
-       struct __vxge_hw_fifo *ringh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_delete(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
 struct vxge_hw_mempool_cbs {
        void (*item_func_alloc)(
                        struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
                        u32                     is_last);
 };
 
-void
-__vxge_hw_mempool_destroy(
-       struct vxge_hw_mempool *mempool);
-
 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath)                             \
                ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
 
@@ -2194,62 +2143,11 @@ __vxge_hw_vpath_rts_table_set(
        u64                     data1,
        u64                     data2);
 
-enum vxge_hw_status
-__vxge_hw_vpath_reset(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
 enum vxge_hw_status
 __vxge_hw_vpath_enable(
        struct __vxge_hw_device *devh,
        u32                     vp_id);
 
-void
-__vxge_hw_vpath_prc_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_initialize(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vp_initialize(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id,
-       struct vxge_hw_vp_config        *config);
-
-void
-__vxge_hw_vp_terminate(
-       struct __vxge_hw_device *devh,
-       u32                     vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(
-       struct __vxge_hw_virtualpath    *vpath,
-       u32                     skip_alarms);
-
 void vxge_hw_device_intr_enable(
        struct __vxge_hw_device *devh);
 
@@ -2320,11 +2218,6 @@ vxge_hw_vpath_vid_get(
        struct __vxge_hw_vpath_handle *vpath_handle,
        u64                     *vid);
 
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       u64                     *vid);
-
 enum vxge_hw_status
 vxge_hw_vpath_vid_delete(
        struct __vxge_hw_vpath_handle *vpath_handle,
@@ -2386,17 +2279,10 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
 
 void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
 
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
-                        int msix_id);
-
 void
 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
                          int msix_id);
 
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
-
 enum vxge_hw_status vxge_hw_vpath_intr_enable(
                                struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
 void
 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
 
-enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
-
-void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
-
 void
 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
                                 void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
 void
 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
 
-/* ========================== PRIVATE API ================================= */
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_error(
-               struct __vxge_hw_device *hldev,
-               u32 vp_id,
-               enum vxge_hw_event type);
-
 #endif
index f1ae75d35d5d4680085c2a9d8148fc2a04233a6b..8251946842e6186bddcd1cac303dbaea74664410 100644 (file)
@@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
        common->ah = sc->ah;
        common->hw = hw;
        common->cachelsz = csz << 2; /* convert to bytes */
+       spin_lock_init(&common->cc_lock);
 
        /* Initialize device */
        ret = ath5k_hw_attach(sc);
index ec98ab50748a7e0d31189a9d414ab28a35b6fda5..a14a5e43cf56d2e5e61d43789b2d348c04a55f0e 100644 (file)
@@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
 
 static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+       {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+       {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+       {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
        {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
        {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
        {0x00008014, 0x044c044c, 0x08980898},
        {0x0000801c, 0x148ec02b, 0x148ec057},
        {0x00008318, 0x000044c0, 0x00008980},
-       {0x00009e00, 0x03721821, 0x03721821},
+       {0x00009e00, 0x0372131c, 0x0372131c},
        {0x0000a230, 0x0000000b, 0x00000016},
        {0x0000a254, 0x00000898, 0x00001130},
 };
@@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
        {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
        {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
        {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
-       {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
-       {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+       {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
        {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
        {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
-       {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
        {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
        {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
        {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
-       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+       {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
-       {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+       {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
        {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
        {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
        {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
-       {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+       {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
        {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
        {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x00009814, 0x9280c00a},
        {0x00009818, 0x00000000},
        {0x0000981c, 0x00020028},
-       {0x00009834, 0x5f3ca3de},
+       {0x00009834, 0x6400a290},
        {0x00009838, 0x0108ecff},
        {0x0000983c, 0x14750600},
        {0x00009880, 0x201fff00},
        {0x00009884, 0x00001042},
        {0x000098a4, 0x00200400},
-       {0x000098b0, 0x52440bbe},
+       {0x000098b0, 0x32840bbe},
        {0x000098d0, 0x004b6a8e},
        {0x000098d4, 0x00000820},
        {0x000098dc, 0x00000000},
@@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e3c, 0xcf946222},
        {0x00009e40, 0x0d261820},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
@@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a220, 0x00000000},
        {0x0000a224, 0x00000000},
        {0x0000a228, 0x10002310},
-       {0x0000a22c, 0x01036a1e},
+       {0x0000a22c, 0x01036a27},
        {0x0000a23c, 0x00000000},
        {0x0000a244, 0x0c000000},
        {0x0000a2a0, 0x00000001},
@@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a2c8, 0x00000000},
        {0x0000a2cc, 0x18c43433},
        {0x0000a2d4, 0x00000000},
-       {0x0000a2dc, 0x00000000},
-       {0x0000a2e0, 0x00000000},
-       {0x0000a2e4, 0x00000000},
-       {0x0000a2e8, 0x00000000},
        {0x0000a2ec, 0x00000000},
        {0x0000a2f0, 0x00000000},
        {0x0000a2f4, 0x00000000},
@@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a430, 0x1ce739ce},
        {0x0000a434, 0x00000000},
        {0x0000a438, 0x00001801},
-       {0x0000a43c, 0x00000000},
+       {0x0000a43c, 0x00100000},
        {0x0000a440, 0x00000000},
        {0x0000a444, 0x00000000},
        {0x0000a448, 0x06000080},
        {0x0000a44c, 0x00000001},
        {0x0000a450, 0x00010000},
        {0x0000a458, 0x00000000},
-       {0x0000a600, 0x00000000},
-       {0x0000a604, 0x00000000},
-       {0x0000a608, 0x00000000},
-       {0x0000a60c, 0x00000000},
-       {0x0000a610, 0x00000000},
-       {0x0000a614, 0x00000000},
-       {0x0000a618, 0x00000000},
-       {0x0000a61c, 0x00000000},
-       {0x0000a620, 0x00000000},
-       {0x0000a624, 0x00000000},
-       {0x0000a628, 0x00000000},
-       {0x0000a62c, 0x00000000},
-       {0x0000a630, 0x00000000},
-       {0x0000a634, 0x00000000},
-       {0x0000a638, 0x00000000},
-       {0x0000a63c, 0x00000000},
        {0x0000a640, 0x00000000},
        {0x0000a644, 0x3fad9d74},
        {0x0000a648, 0x0048060a},
-       {0x0000a64c, 0x00000637},
+       {0x0000a64c, 0x00003c37},
        {0x0000a670, 0x03020100},
        {0x0000a674, 0x09080504},
        {0x0000a678, 0x0d0c0b0a},
@@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000a8f4, 0x00000000},
        {0x0000b2d0, 0x00000080},
        {0x0000b2d4, 0x00000000},
-       {0x0000b2dc, 0x00000000},
-       {0x0000b2e0, 0x00000000},
-       {0x0000b2e4, 0x00000000},
-       {0x0000b2e8, 0x00000000},
        {0x0000b2ec, 0x00000000},
        {0x0000b2f0, 0x00000000},
        {0x0000b2f4, 0x00000000},
@@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x0000b8f4, 0x00000000},
        {0x0000c2d0, 0x00000080},
        {0x0000c2d4, 0x00000000},
-       {0x0000c2dc, 0x00000000},
-       {0x0000c2e0, 0x00000000},
-       {0x0000c2e4, 0x00000000},
-       {0x0000c2e8, 0x00000000},
        {0x0000c2ec, 0x00000000},
        {0x0000c2f0, 0x00000000},
        {0x0000c2f4, 0x00000000},
@@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
 
 static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
        {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
        {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
        {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
        {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
-       {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+       {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
        {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
        {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
        {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+       {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
        {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
        {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
 
 static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
        {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
        {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
        {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
-       {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+       {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
        {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
        {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
        {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
        {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+       {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
        {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
        {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
        {0x0000b074, 0x00000000},
        {0x0000b078, 0x00000000},
        {0x0000b07c, 0x00000000},
-       {0x0000b080, 0x32323232},
-       {0x0000b084, 0x2f2f3232},
-       {0x0000b088, 0x23282a2d},
-       {0x0000b08c, 0x1c1e2123},
-       {0x0000b090, 0x14171919},
-       {0x0000b094, 0x0e0e1214},
-       {0x0000b098, 0x03050707},
-       {0x0000b09c, 0x00030303},
+       {0x0000b080, 0x2a2d2f32},
+       {0x0000b084, 0x21232328},
+       {0x0000b088, 0x19191c1e},
+       {0x0000b08c, 0x12141417},
+       {0x0000b090, 0x07070e0e},
+       {0x0000b094, 0x03030305},
+       {0x0000b098, 0x00000003},
+       {0x0000b09c, 0x00000000},
        {0x0000b0a0, 0x00000000},
        {0x0000b0a4, 0x00000000},
        {0x0000b0a8, 0x00000000},
@@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
 
 static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
        {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
        {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
        {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+       {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+       {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+       {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+       {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+       {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+       {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
        {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
        {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
        {0x00008144, 0xffffffff},
        {0x00008168, 0x00000000},
        {0x0000816c, 0x00000000},
-       {0x00008170, 0x18486200},
-       {0x00008174, 0x33332210},
-       {0x00008178, 0x00000000},
-       {0x0000817c, 0x00020000},
        {0x000081c0, 0x00000000},
        {0x000081c4, 0x33332210},
        {0x000081c8, 0x00000000},
        {0x000081cc, 0x00000000},
-       {0x000081d4, 0x00000000},
        {0x000081ec, 0x00000000},
        {0x000081f0, 0x00000000},
        {0x000081f4, 0x00000000},
index 7c38229ba670a28a624ab521a02820eae581c10a..716db414c258f254263a47238f45aa9fd6c2d813 100644 (file)
@@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
            (((Y[6] - Y[3]) * 1 << scale_factor) +
             (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
 
+       /* prevent division by zero */
+       if (G_fxp == 0)
+               return false;
+
        Y_intercept =
            (G_fxp * (x_est[0] - x_est[3]) +
             (1 << scale_factor)) / (1 << scale_factor) + Y[3];
@@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
 
        for (i = 0; i <= 3; i++) {
                y_est[i] = i * 32;
-
-               /* prevent division by zero */
-               if (G_fxp == 0)
-                       return false;
-
                x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
        }
 
+       if (y_est[max_index] == 0)
+               return false;
+
        x_est_fxp1_nonlin =
            x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
                                G_fxp) / G_fxp;
@@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
 
        Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
        scale_B = scale_B / (1 << Q_scale_B);
+       if (scale_B == 0)
+               return false;
        Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
        Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
        beta_raw = beta_raw / (1 << Q_beta);
index 4ed010d4ef96d9f3288cc9d1c81dbcd8d24e50fa..19891e7d49aea7e371cfaf43d0228013f03e313e 100644 (file)
@@ -370,7 +370,7 @@ void ath_beacon_tasklet(unsigned long data)
                        ath_print(common, ATH_DBG_BSTUCK,
                                  "beacon is officially stuck\n");
                        sc->sc_flags |= SC_OP_TSF_RESET;
-                       ath_reset(sc, false);
+                       ath_reset(sc, true);
                }
 
                return;
index bc6c4df9712c5b921041fdc7d1f49a1214a5a091..95b41db0d86b5b945189ba2c6fd1034043c26811 100644 (file)
@@ -577,6 +577,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        common->hw = sc->hw;
        common->priv = sc;
        common->debug_mask = ath9k_debug;
+       spin_lock_init(&common->cc_lock);
 
        spin_lock_init(&sc->wiphy_lock);
        spin_lock_init(&sc->sc_resetlock);
index 3ff0e476c2b3d7ad4cc209360042c6c5097d87a8..c6ec800d7a6b6ef4fe91cc00ccffdb543d3504d0 100644 (file)
@@ -182,6 +182,9 @@ static void ath_update_survey_stats(struct ath_softc *sc)
        struct ath_cycle_counters *cc = &common->cc_survey;
        unsigned int div = common->clockrate * 1000;
 
+       if (!ah->curchan)
+               return;
+
        if (ah->power_mode == ATH9K_PM_AWAKE)
                ath_hw_cycle_counters_update(common);
 
@@ -577,7 +580,7 @@ void ath_hw_check(struct work_struct *work)
 
                msleep(1);
        }
-       ath_reset(sc, false);
+       ath_reset(sc, true);
 
 out:
        ath9k_ps_restore(sc);
@@ -595,7 +598,7 @@ void ath9k_tasklet(unsigned long data)
        ath9k_ps_wakeup(sc);
 
        if (status & ATH9K_INT_FATAL) {
-               ath_reset(sc, false);
+               ath_reset(sc, true);
                ath9k_ps_restore(sc);
                return;
        }
index d077186da870ea2ccc507b168fba81d21ffcac8d..30ef2dfc1ed2037ee63c54ec0a16223732194ac4 100644 (file)
@@ -673,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
        u16 aggr_limit = 0, al = 0, bpad = 0,
                al_delta, h_baw = tid->baw_size / 2;
        enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+       struct ieee80211_tx_info *tx_info;
 
        bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
 
@@ -699,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                        break;
                }
 
+               tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+               if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+                       !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
+                       break;
+
                /* do not exceed subframe limit */
                if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
                        status = ATH_AGGR_LIMITED;
@@ -2157,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
                ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
                          "tx hung, resetting the chip\n");
                ath9k_ps_wakeup(sc);
-               ath_reset(sc, false);
+               ath_reset(sc, true);
                ath9k_ps_restore(sc);
        }
 
index f78728c38294e07c6098853e8fb902f3c24d9bb7..568174c71b94af0b830566b6d4bacb33379be1df 100644 (file)
@@ -116,8 +116,9 @@ __regwrite_out :                                                    \
 } while (0);
 
 
-#define carl9170_async_get_buf()                                       \
+#define carl9170_async_regwrite_get_buf()                              \
 do {                                                                   \
+       __nreg = 0;                                                     \
        __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC,       \
                                 CARL9170_MAX_CMD_PAYLOAD_LEN);         \
        if (__cmd == NULL) {                                            \
@@ -128,38 +129,42 @@ do {                                                                      \
 
 #define carl9170_async_regwrite_begin(carl)                            \
 do {                                                                   \
-       int __nreg = 0, __err = 0;                                      \
        struct ar9170 *__carl = carl;                                   \
        struct carl9170_cmd *__cmd;                                     \
-       carl9170_async_get_buf();                                       \
+       unsigned int __nreg;                                            \
+       int  __err = 0;                                                 \
+       carl9170_async_regwrite_get_buf();                              \
+
+#define carl9170_async_regwrite_flush()                                        \
+do {                                                                   \
+       if (__cmd == NULL || __nreg == 0)                               \
+               break;                                                  \
+                                                                       \
+       if (IS_ACCEPTING_CMD(__carl) && __nreg) {                       \
+               __cmd->hdr.len = 8 * __nreg;                            \
+               __err = __carl9170_exec_cmd(__carl, __cmd, true);       \
+               __cmd = NULL;                                           \
+               break;                                                  \
+       }                                                               \
+       goto __async_regwrite_out;                                      \
+} while (0)
 
 #define carl9170_async_regwrite(r, v) do {                             \
+       if (__cmd == NULL)                                              \
+               carl9170_async_regwrite_get_buf();                      \
        __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r);                 \
        __cmd->wreg.regs[__nreg].val = cpu_to_le32(v);                  \
        __nreg++;                                                       \
-       if ((__nreg >= PAYLOAD_MAX/2)) {                                \
-               if (IS_ACCEPTING_CMD(__carl)) {                         \
-                       __cmd->hdr.len = 8 * __nreg;                    \
-                       __err = __carl9170_exec_cmd(__carl, __cmd, true);\
-                       __cmd = NULL;                                   \
-                       carl9170_async_get_buf();                       \
-               } else {                                                \
-                       goto __async_regwrite_out;                      \
-               }                                                       \
-               __nreg = 0;                                             \
-               if (__err)                                              \
-                       goto __async_regwrite_out;                      \
-       }                                                               \
+       if ((__nreg >= PAYLOAD_MAX / 2))                                \
+               carl9170_async_regwrite_flush();                        \
 } while (0)
 
-#define carl9170_async_regwrite_finish()                               \
+#define carl9170_async_regwrite_finish() do {                          \
 __async_regwrite_out :                                                 \
-       if (__err == 0 && __nreg) {                                     \
-               __cmd->hdr.len = 8 * __nreg;                            \
-               if (IS_ACCEPTING_CMD(__carl))                           \
-                       __err = __carl9170_exec_cmd(__carl, __cmd, true);\
-               __nreg = 0;                                             \
-       }
+       if (__cmd != NULL && __err == 0)                                \
+               carl9170_async_regwrite_flush();                        \
+       kfree(__cmd);                                                   \
+} while (0)                                                            \
 
 #define carl9170_async_regwrite_result()                               \
        __err;                                                          \
index 3cc99f3f7ab5728af8eeb40c759f954a319671b7..980ae70ea424f6dc50a93734446536588bea9734 100644 (file)
@@ -639,8 +639,8 @@ init:
                if (err)
                        goto unlock;
        } else {
-               err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
                rcu_read_unlock();
+               err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
 
                if (err)
                        goto unlock;
index c7f6193934eaabe82cbe8cb5acdbd23afda48662..d8607f4c144d80741f26eb133323aff5cebe936c 100644 (file)
@@ -591,16 +591,23 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
                        const bool free_buf)
 {
        struct urb *urb;
+       int err = 0;
 
-       if (!IS_INITIALIZED(ar))
-               return -EPERM;
+       if (!IS_INITIALIZED(ar)) {
+               err = -EPERM;
+               goto err_free;
+       }
 
-       if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
-               return -EINVAL;
+       if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
+               err = -EINVAL;
+               goto err_free;
+       }
 
        urb = usb_alloc_urb(0, GFP_ATOMIC);
-       if (!urb)
-               return -ENOMEM;
+       if (!urb) {
+               err = -ENOMEM;
+               goto err_free;
+       }
 
        usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
                AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
@@ -613,6 +620,12 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
        usb_free_urb(urb);
 
        return carl9170_usb_submit_cmd_urb(ar);
+
+err_free:
+       if (free_buf)
+               kfree(cmd);
+
+       return err;
 }
 
 int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
index dfec5496055e410b7beffdb02e8d2cac556e016f..e0f2d122e124486b7e1405eabd48ce80cdde54bb 100644 (file)
@@ -2964,7 +2964,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
                                        (2 - i));
                }
 
-               for (j = 0; i < 4; j++) {
+               for (j = 0; j < 4; j++) {
                        if (j < 3) {
                                cur_lna = lna[j];
                                cur_hpf1 = hpf1[j];
index db57aea629d93857e6a3b068ce74059b87c0d771..2b078a995729a0bbaa2ccc311fea83fa74bb7e19 100644 (file)
@@ -1227,7 +1227,8 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
        struct ieee80211_tx_info *info;
 
        if (unlikely(!agg->wait_for_ba))  {
-               IWL_ERR(priv, "Received BA when not expected\n");
+               if (unlikely(ba_resp->bitmap))
+                       IWL_ERR(priv, "Received BA when not expected\n");
                return -EINVAL;
        }
 
index 4fe246824db33c5ea47813d049699136958a89e9..58b4f935a3f634f89157e32389e90cbf29898310 100644 (file)
@@ -1,6 +1,8 @@
 wl1251-objs            = main.o event.o tx.o rx.o ps.o cmd.o \
                          acx.o boot.o init.o debugfs.o io.o
+wl1251_spi-objs                += spi.o
+wl1251_sdio-objs       += sdio.o
 
-obj-$(CONFIG_WL1251)   += wl1251.o
-obj-$(CONFIG_WL1251_SPI)       += spi.o
-obj-$(CONFIG_WL1251_SDIO)      += sdio.o
+obj-$(CONFIG_WL1251)           += wl1251.o
+obj-$(CONFIG_WL1251_SPI)       += wl1251_spi.o
+obj-$(CONFIG_WL1251_SDIO)      += wl1251_sdio.o
index 01f0306525a504a7ed2a9f954f7376e06170c57e..895136f13edcf9bd4f0402d9eeacf62d229dd722 100644 (file)
@@ -212,8 +212,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
 #endif /* HAVE_PCI_MMAP */
        int ret = 0;
 
-       lock_kernel();
-
        switch (cmd) {
        case PCIIOC_CONTROLLER:
                ret = pci_domain_nr(dev->bus);
@@ -242,7 +240,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
                break;
        };
 
-       unlock_kernel();
        return ret;
 }
 
index f540ff96c53f8b732dcd4667a9afeccb64fc13dd..e61db9dfebefd2382c65cbbecf8bdb57d3e63f28 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <linux/interrupt.h>
 #include <asm/intel_scu_ipc.h>
 #include <linux/device.h>
 #include <linux/intel_pmic_gpio.h>
index 3222fa3c808ca4c96eeb3fc436122f041a84eebc..0f4a53bdaa3cf9b8e60c4c8dfbf5d6f80b3cf882 100644 (file)
@@ -192,7 +192,7 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv)
       out:return 0;
 }
 
-static struct device rio_bus = {
+struct device rio_bus = {
        .init_name = "rapidio",
 };
 
index 8070e074c739f89b0902cfcc1ed9c20e5374c76c..1eb82c4c712e6c68292e45c2617f3f7af0c7877a 100644 (file)
@@ -48,7 +48,7 @@ DEFINE_SPINLOCK(rio_global_list_lock);
 static int next_destid = 0;
 static int next_switchid = 0;
 static int next_net = 0;
-static int next_comptag;
+static int next_comptag = 1;
 
 static struct timer_list rio_enum_timer =
 TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -121,27 +121,6 @@ static int rio_clear_locks(struct rio_mport *port)
        u32 result;
        int ret = 0;
 
-       /* Assign component tag to all devices */
-       next_comptag = 1;
-       rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
-
-       list_for_each_entry(rdev, &rio_devices, global_list) {
-               /* Mark device as discovered */
-               rio_read_config_32(rdev,
-                                  rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
-                                  &result);
-               rio_write_config_32(rdev,
-                                   rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
-                                   result | RIO_PORT_GEN_DISCOVERED);
-
-               rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
-               rdev->comp_tag = next_comptag++;
-               if (next_comptag >= 0x10000) {
-                       pr_err("RIO: Component Tag Counter Overflow\n");
-                       break;
-               }
-       }
-
        /* Release host device id locks */
        rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
                                  port->host_deviceid);
@@ -162,6 +141,15 @@ static int rio_clear_locks(struct rio_mport *port)
                               rdev->vid, rdev->did);
                        ret = -EINVAL;
                }
+
+               /* Mark device as discovered and enable master */
+               rio_read_config_32(rdev,
+                                  rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+                                  &result);
+               result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER;
+               rio_write_config_32(rdev,
+                                   rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+                                   result);
        }
 
        return ret;
@@ -420,11 +408,27 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
                                                hopcount, RIO_EFB_ERR_MGMNT);
        }
 
+       if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+               rio_mport_read_config_32(port, destid, hopcount,
+                                        RIO_SWP_INFO_CAR, &rdev->swpinfo);
+       }
+
        rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
                                 &rdev->src_ops);
        rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
                                 &rdev->dst_ops);
 
+       if (do_enum) {
+               /* Assign component tag to device */
+               if (next_comptag >= 0x10000) {
+                       pr_err("RIO: Component Tag Counter Overflow\n");
+                       goto cleanup;
+               }
+               rio_mport_write_config_32(port, destid, hopcount,
+                                         RIO_COMPONENT_TAG_CSR, next_comptag);
+               rdev->comp_tag = next_comptag++;
+       }
+
        if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
                if (do_enum) {
                        rio_set_device_id(port, destid, hopcount, next_destid);
@@ -439,9 +443,10 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
 
        /* If a PE has both switch and other functions, show it as a switch */
        if (rio_is_switch(rdev)) {
-               rio_mport_read_config_32(port, destid, hopcount,
-                                        RIO_SWP_INFO_CAR, &rdev->swpinfo);
-               rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
+               rswitch = kzalloc(sizeof(*rswitch) +
+                                 RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
+                                 sizeof(rswitch->nextdev[0]),
+                                 GFP_KERNEL);
                if (!rswitch)
                        goto cleanup;
                rswitch->switchid = next_switchid;
@@ -458,6 +463,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
                                rdid++)
                        rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
                rdev->rswitch = rswitch;
+               rswitch->rdev = rdev;
                dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
                             rdev->rswitch->switchid);
                rio_switch_init(rdev, do_enum);
@@ -478,6 +484,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
        }
 
        rdev->dev.bus = &rio_bus_type;
+       rdev->dev.parent = &rio_bus;
 
        device_initialize(&rdev->dev);
        rdev->dev.release = rio_release_dev;
@@ -717,87 +724,54 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
        return (u16) (result & 0xffff);
 }
 
-/**
- * rio_get_swpinfo_inport- Gets the ingress port number
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns port number being used to access the switch device.
- */
-static u8
-rio_get_swpinfo_inport(struct rio_mport *mport, u16 destid, u8 hopcount)
-{
-       u32 result;
-
-       rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
-                                &result);
-
-       return (u8) (result & 0xff);
-}
-
-/**
- * rio_get_swpinfo_tports- Gets total number of ports on the switch
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns total numbers of ports implemented by the switch device.
- */
-static u8 rio_get_swpinfo_tports(struct rio_mport *mport, u16 destid,
-                                u8 hopcount)
-{
-       u32 result;
-
-       rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
-                                &result);
-
-       return RIO_GET_TOTAL_PORTS(result);
-}
-
-/**
- * rio_net_add_mport- Add a master port to a RIO network
- * @net: RIO network
- * @port: Master port to add
- *
- * Adds a master port to the network list of associated master
- * ports..
- */
-static void rio_net_add_mport(struct rio_net *net, struct rio_mport *port)
-{
-       spin_lock(&rio_global_list_lock);
-       list_add_tail(&port->nnode, &net->mports);
-       spin_unlock(&rio_global_list_lock);
-}
-
 /**
  * rio_enum_peer- Recursively enumerate a RIO network through a master port
  * @net: RIO network being enumerated
  * @port: Master port to send transactions
  * @hopcount: Number of hops into the network
+ * @prev: Previous RIO device connected to the enumerated one
+ * @prev_port: Port on previous RIO device
  *
  * Recursively enumerates a RIO network.  Transactions are sent via the
  * master port passed in @port.
  */
 static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
-                        u8 hopcount)
+                        u8 hopcount, struct rio_dev *prev, int prev_port)
 {
        int port_num;
-       int num_ports;
        int cur_destid;
        int sw_destid;
        int sw_inport;
        struct rio_dev *rdev;
        u16 destid;
+       u32 regval;
        int tmp;
 
+       if (rio_mport_chk_dev_access(port,
+                       RIO_ANY_DESTID(port->sys_size), hopcount)) {
+               pr_debug("RIO: device access check failed\n");
+               return -1;
+       }
+
        if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) {
                pr_debug("RIO: PE already discovered by this host\n");
                /*
                 * Already discovered by this host. Add it as another
-                * master port for the current network.
+                * link to the existing device.
                 */
-               rio_net_add_mport(net, port);
+               rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size),
+                               hopcount, RIO_COMPONENT_TAG_CSR, &regval);
+
+               if (regval) {
+                       rdev = rio_get_comptag((regval & 0xffff), NULL);
+
+                       if (rdev && prev && rio_is_switch(prev)) {
+                               pr_debug("RIO: redundant path to %s\n",
+                                        rio_name(rdev));
+                               prev->rswitch->nextdev[prev_port] = rdev;
+                       }
+               }
+
                return 0;
        }
 
@@ -828,13 +802,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
        if (rdev) {
                /* Add device to the global and bus/net specific list. */
                list_add_tail(&rdev->net_list, &net->devices);
+               rdev->prev = prev;
+               if (prev && rio_is_switch(prev))
+                       prev->rswitch->nextdev[prev_port] = rdev;
        } else
                return -1;
 
        if (rio_is_switch(rdev)) {
                next_switchid++;
-               sw_inport = rio_get_swpinfo_inport(port,
-                               RIO_ANY_DESTID(port->sys_size), hopcount);
+               sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
                rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
                                    port->host_deviceid, sw_inport, 0);
                rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
@@ -847,14 +823,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
                        rdev->rswitch->route_table[destid] = sw_inport;
                }
 
-               num_ports =
-                   rio_get_swpinfo_tports(port, RIO_ANY_DESTID(port->sys_size),
-                                               hopcount);
                pr_debug(
                    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
-                   rio_name(rdev), rdev->vid, rdev->did, num_ports);
+                   rio_name(rdev), rdev->vid, rdev->did,
+                   RIO_GET_TOTAL_PORTS(rdev->swpinfo));
                sw_destid = next_destid;
-               for (port_num = 0; port_num < num_ports; port_num++) {
+               for (port_num = 0;
+                    port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+                    port_num++) {
                        /*Enable Input Output Port (transmitter reviever)*/
                        rio_enable_rx_tx_port(port, 0,
                                              RIO_ANY_DESTID(port->sys_size),
@@ -879,7 +855,8 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
                                                RIO_ANY_DESTID(port->sys_size),
                                                port_num, 0);
 
-                               if (rio_enum_peer(net, port, hopcount + 1) < 0)
+                               if (rio_enum_peer(net, port, hopcount + 1,
+                                                 rdev, port_num) < 0)
                                        return -1;
 
                                /* Update routing tables */
@@ -945,10 +922,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
  */
 static int rio_enum_complete(struct rio_mport *port)
 {
-       u32 tag_csr;
+       u32 regval;
 
-       rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
-       return (tag_csr & 0xffff) ? 1 : 0;
+       rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+                                &regval);
+       return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0;
 }
 
 /**
@@ -966,7 +944,6 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
              u8 hopcount)
 {
        u8 port_num, route_port;
-       int num_ports;
        struct rio_dev *rdev;
        u16 ndestid;
 
@@ -983,13 +960,14 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
                /* Associated destid is how we accessed this switch */
                rdev->rswitch->destid = destid;
 
-               num_ports = rio_get_swpinfo_tports(port, destid, hopcount);
                pr_debug(
                    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
-                   rio_name(rdev), rdev->vid, rdev->did, num_ports);
-               for (port_num = 0; port_num < num_ports; port_num++) {
-                       if (rio_get_swpinfo_inport(port, destid, hopcount) ==
-                           port_num)
+                   rio_name(rdev), rdev->vid, rdev->did,
+                   RIO_GET_TOTAL_PORTS(rdev->swpinfo));
+               for (port_num = 0;
+                    port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+                    port_num++) {
+                       if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
                                continue;
 
                        if (rio_sport_is_active
@@ -1011,6 +989,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
                                                break;
                                }
 
+                               if (ndestid == RIO_ANY_DESTID(port->sys_size))
+                                       continue;
                                rio_unlock_device(port, destid, hopcount);
                                if (rio_disc_peer
                                    (net, port, ndestid, hopcount + 1) < 0)
@@ -1108,8 +1088,7 @@ static void rio_update_route_tables(struct rio_mport *port)
                                if (rswitch->destid == destid)
                                        continue;
 
-                               sport = rio_get_swpinfo_inport(port,
-                                               rswitch->destid, rswitch->hopcount);
+                               sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
 
                                if (rswitch->add_entry) {
                                        rio_route_add_entry(port, rswitch,
@@ -1184,7 +1163,11 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
                /* Enable Input Output Port (transmitter reviever) */
                rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
 
-               if (rio_enum_peer(net, mport, 0) < 0) {
+               /* Set component tag for host */
+               rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR,
+                                         next_comptag++);
+
+               if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) {
                        /* A higher priority host won enumeration, bail. */
                        printk(KERN_INFO
                               "RIO: master port %d device has lost enumeration to a remote host\n",
index 00b475658356f7b66bebeba2d735bc6de848de09..137ed93ee33fefc50b1063036dfc8e9e5e2e9b12 100644 (file)
@@ -40,9 +40,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
        char *str = buf;
        int i;
 
-       if (!rdev->rswitch)
-               goto out;
-
        for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
                        i++) {
                if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
@@ -52,7 +49,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
                            rdev->rswitch->route_table[i]);
        }
 
-      out:
        return (str - buf);
 }
 
@@ -63,10 +59,11 @@ struct device_attribute rio_dev_attrs[] = {
        __ATTR_RO(asm_did),
        __ATTR_RO(asm_vid),
        __ATTR_RO(asm_rev),
-       __ATTR_RO(routes),
        __ATTR_NULL,
 };
 
+static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
+
 static ssize_t
 rio_read_config(struct file *filp, struct kobject *kobj,
                struct bin_attribute *bin_attr,
@@ -218,7 +215,17 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
 {
        int err = 0;
 
-       err = sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr);
+       err = device_create_bin_file(&rdev->dev, &rio_config_attr);
+
+       if (!err && rdev->rswitch) {
+               err = device_create_file(&rdev->dev, &dev_attr_routes);
+               if (!err && rdev->rswitch->sw_sysfs)
+                       err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
+       }
+
+       if (err)
+               pr_warning("RIO: Failed to create attribute file(s) for %s\n",
+                          rio_name(rdev));
 
        return err;
 }
@@ -231,5 +238,10 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
  */
 void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
 {
-       sysfs_remove_bin_file(&rdev->dev.kobj, &rio_config_attr);
+       device_remove_bin_file(&rdev->dev, &rio_config_attr);
+       if (rdev->rswitch) {
+               device_remove_file(&rdev->dev, &dev_attr_routes);
+               if (rdev->rswitch->sw_sysfs)
+                       rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
+       }
 }
index 74e9d22d95fb73410f94be6853bdafc81ee2ab1f..68cf0c99138a94517f3f2e9dae03de4149049092 100644 (file)
@@ -443,7 +443,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
  * @from is not %NULL, searches continue from next device on the global
  * list.
  */
-static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
+struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
 {
        struct list_head *n;
        struct rio_dev *rdev;
@@ -494,6 +494,232 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
        return 0;
 }
 
+/**
+ * rio_chk_dev_route - Validate route to the specified device.
+ * @rdev:  RIO device failed to respond
+ * @nrdev: Last active device on the route to rdev
+ * @npnum: nrdev's port number on the route to rdev
+ *
+ * Follows a route to the specified RIO device to determine the last available
+ * device (and corresponding RIO port) on the route.
+ */
+static int
+rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
+{
+       u32 result;
+       int p_port, dstid, rc = -EIO;
+       struct rio_dev *prev = NULL;
+
+       /* Find switch with failed RIO link */
+       while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) {
+               if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) {
+                       prev = rdev->prev;
+                       break;
+               }
+               rdev = rdev->prev;
+       }
+
+       if (prev == NULL)
+               goto err_out;
+
+       dstid = (rdev->pef & RIO_PEF_SWITCH) ?
+                       rdev->rswitch->destid : rdev->destid;
+       p_port = prev->rswitch->route_table[dstid];
+
+       if (p_port != RIO_INVALID_ROUTE) {
+               pr_debug("RIO: link failed on [%s]-P%d\n",
+                        rio_name(prev), p_port);
+               *nrdev = prev;
+               *npnum = p_port;
+               rc = 0;
+       } else
+               pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev));
+err_out:
+       return rc;
+}
+
+/**
+ * rio_mport_chk_dev_access - Validate access to the specified device.
+ * @mport: Master port to send transactions
+ * @destid: Device destination ID in network
+ * @hopcount: Number of hops into the network
+ */
+int
+rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
+{
+       int i = 0;
+       u32 tmp;
+
+       while (rio_mport_read_config_32(mport, destid, hopcount,
+                                       RIO_DEV_ID_CAR, &tmp)) {
+               i++;
+               if (i == RIO_MAX_CHK_RETRY)
+                       return -EIO;
+               mdelay(1);
+       }
+
+       return 0;
+}
+
+/**
+ * rio_chk_dev_access - Validate access to the specified device.
+ * @rdev: Pointer to RIO device control structure
+ */
+static int rio_chk_dev_access(struct rio_dev *rdev)
+{
+       u8 hopcount = 0xff;
+       u16 destid = rdev->destid;
+
+       if (rdev->rswitch) {
+               destid = rdev->rswitch->destid;
+               hopcount = rdev->rswitch->hopcount;
+       }
+
+       return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+}
+
+/**
+ * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and
+ *                        returns link-response (if requested).
+ * @rdev: RIO devive to issue Input-status command
+ * @pnum: Device port number to issue the command
+ * @lnkresp: Response from a link partner
+ */
+static int
+rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       u32 regval;
+       int checkcount;
+
+       if (lnkresp) {
+               /* Read from link maintenance response register
+                * to clear valid bit */
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+                       &regval);
+               udelay(50);
+       }
+
+       /* Issue Input-status command */
+       rio_mport_write_config_32(mport, destid, hopcount,
+               rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+               RIO_MNT_REQ_CMD_IS);
+
+       /* Exit if the response is not expected */
+       if (lnkresp == NULL)
+               return 0;
+
+       checkcount = 3;
+       while (checkcount--) {
+               udelay(50);
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+                       &regval);
+               if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
+                       *lnkresp = regval;
+                       return 0;
+               }
+       }
+
+       return -EIO;
+}
+
+/**
+ * rio_clr_err_stopped - Clears port Error-stopped states.
+ * @rdev: Pointer to RIO device control structure
+ * @pnum: Switch port number to clear errors
+ * @err_status: port error status (if 0 reads register from device)
+ */
+static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
+       u32 regval;
+       u32 far_ackid, far_linkstat, near_ackid;
+
+       if (err_status == 0)
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+                       &err_status);
+
+       if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+               pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
+               /*
+                * Send a Link-Request/Input-Status control symbol
+                */
+               if (rio_get_input_status(rdev, pnum, &regval)) {
+                       pr_debug("RIO_EM: Input-status response timeout\n");
+                       goto rd_err;
+               }
+
+               pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n",
+                        pnum, regval);
+               far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
+               far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+                       &regval);
+               pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
+               near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
+               pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \
+                        " near_ackID=0x%02x\n",
+                       pnum, far_ackid, far_linkstat, near_ackid);
+
+               /*
+                * If required, synchronize ackIDs of near and
+                * far sides.
+                */
+               if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) ||
+                   (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) {
+                       /* Align near outstanding/outbound ackIDs with
+                        * far inbound.
+                        */
+                       rio_mport_write_config_32(mport, destid,
+                               hopcount, rdev->phys_efptr +
+                                       RIO_PORT_N_ACK_STS_CSR(pnum),
+                               (near_ackid << 24) |
+                                       (far_ackid << 8) | far_ackid);
+                       /* Align far outstanding/outbound ackIDs with
+                        * near inbound.
+                        */
+                       far_ackid++;
+                       if (nextdev)
+                               rio_write_config_32(nextdev,
+                                       nextdev->phys_efptr +
+                                       RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
+                                       (far_ackid << 24) |
+                                       (near_ackid << 8) | near_ackid);
+                       else
+                               pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+               }
+rd_err:
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+                       &err_status);
+               pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+       }
+
+       if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+               pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
+               rio_get_input_status(nextdev,
+                                    RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
+               udelay(50);
+
+               rio_mport_read_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+                       &err_status);
+               pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+       }
+
+       return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+                             RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+}
+
 /**
  * rio_inb_pwrite_handler - process inbound port-write message
  * @pw_msg: pointer to inbound port-write message
@@ -507,13 +733,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
        struct rio_mport *mport;
        u8 hopcount;
        u16 destid;
-       u32 err_status;
+       u32 err_status, em_perrdet, em_ltlerrdet;
        int rc, portnum;
 
        rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
        if (rdev == NULL) {
-               /* Someting bad here (probably enumeration error) */
-               pr_err("RIO: %s No matching device for CTag 0x%08x\n",
+               /* Device removed or enumeration error */
+               pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
                        __func__, pw_msg->em.comptag);
                return -EIO;
        }
@@ -524,12 +750,11 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
        {
        u32 i;
        for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
-                       pr_debug("0x%02x: %08x %08x %08x %08x",
+                       pr_debug("0x%02x: %08x %08x %08x %08x\n",
                                 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
                                 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
                        i += 4;
        }
-       pr_debug("\n");
        }
 #endif
 
@@ -545,6 +770,26 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
                        return 0;
        }
 
+       portnum = pw_msg->em.is_port & 0xFF;
+
+       /* Check if device and route to it are functional:
+        * Sometimes devices may send PW message(s) just before being
+        * powered down (or link being lost).
+        */
+       if (rio_chk_dev_access(rdev)) {
+               pr_debug("RIO: device access failed - get link partner\n");
+               /* Scan route to the device and identify failed link.
+                * This will replace device and port reported in PW message.
+                * PW message should not be used after this point.
+                */
+               if (rio_chk_dev_route(rdev, &rdev, &portnum)) {
+                       pr_err("RIO: Route trace for %s failed\n",
+                               rio_name(rdev));
+                       return -EIO;
+               }
+               pw_msg = NULL;
+       }
+
        /* For End-point devices processing stops here */
        if (!(rdev->pef & RIO_PEF_SWITCH))
                return 0;
@@ -562,9 +807,6 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
        /*
         * Process the port-write notification from switch
         */
-
-       portnum = pw_msg->em.is_port & 0xFF;
-
        if (rdev->rswitch->em_handle)
                rdev->rswitch->em_handle(rdev, portnum);
 
@@ -573,29 +815,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
                        &err_status);
        pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
 
-       if (pw_msg->em.errdetect) {
-               pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
-                        portnum, pw_msg->em.errdetect);
-               /* Clear EM Port N Error Detect CSR */
-               rio_mport_write_config_32(mport, destid, hopcount,
-                       rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
-       }
+       if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
 
-       if (pw_msg->em.ltlerrdet) {
-               pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
-                        pw_msg->em.ltlerrdet);
-               /* Clear EM L/T Layer Error Detect CSR */
-               rio_mport_write_config_32(mport, destid, hopcount,
-                       rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
-       }
+               if (!(rdev->rswitch->port_ok & (1 << portnum))) {
+                       rdev->rswitch->port_ok |= (1 << portnum);
+                       rio_set_port_lockout(rdev, portnum, 0);
+                       /* Schedule Insertion Service */
+                       pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
+                              rio_name(rdev), portnum);
+               }
 
-       /* Clear Port Errors */
-       rio_mport_write_config_32(mport, destid, hopcount,
-                       rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
-                       err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
+               /* Clear error-stopped states (if reported).
+                * Depending on the link partner state, two attempts
+                * may be needed for successful recovery.
+                */
+               if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+                                 RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+                       if (rio_clr_err_stopped(rdev, portnum, err_status))
+                               rio_clr_err_stopped(rdev, portnum, 0);
+               }
+       }  else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */
 
-       if (rdev->rswitch->port_ok & (1 << portnum)) {
-               if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
+               if (rdev->rswitch->port_ok & (1 << portnum)) {
                        rdev->rswitch->port_ok &= ~(1 << portnum);
                        rio_set_port_lockout(rdev, portnum, 1);
 
@@ -608,21 +849,32 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
                        pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
                               rio_name(rdev), portnum);
                }
-       } else {
-               if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
-                       rdev->rswitch->port_ok |= (1 << portnum);
-                       rio_set_port_lockout(rdev, portnum, 0);
+       }
 
-                       /* Schedule Insertion Service */
-                       pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
-                              rio_name(rdev), portnum);
-               }
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+       if (em_perrdet) {
+               pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
+                        portnum, em_perrdet);
+               /* Clear EM Port N Error Detect CSR */
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
+       }
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+       if (em_ltlerrdet) {
+               pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
+                        em_ltlerrdet);
+               /* Clear EM L/T Layer Error Detect CSR */
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
        }
 
-       /* Clear Port-Write Pending bit */
+       /* Clear remaining error bits and Port-Write Pending bit */
        rio_mport_write_config_32(mport, destid, hopcount,
                        rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
-                       RIO_PORT_N_ERR_STS_PW_PEND);
+                       err_status);
 
        return 0;
 }
index f27b7a9c47d2c685269f847141e2fc64c3782e64..b1af414f15e60f8e0cee81270321fa2d6ddf5c84 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/list.h>
 #include <linux/rio.h>
 
+#define RIO_MAX_CHK_RETRY      3
+
 /* Functions internal to the RIO core code */
 
 extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
@@ -22,6 +24,8 @@ extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
                                 u16 destid, u8 hopcount);
 extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
                             u8 hopcount, u32 from);
+extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
+                                   u8 hopcount);
 extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
 extern int rio_enum_mport(struct rio_mport *mport);
 extern int rio_disc_mport(struct rio_mport *mport);
@@ -34,6 +38,7 @@ extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
 extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
                                   u8 hopcount, u16 table);
 extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
+extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
 
 /* Structures internal to the RIO core code */
 extern struct device_attribute rio_dev_attrs[];
index 2b4e9b2b66319672c98a3433ba8b092fa0f057c6..f47fee5d4563a44d8ae6e918b8422e111cb5823c 100644 (file)
@@ -20,6 +20,13 @@ config RAPIDIO_TSI568
        ---help---
          Includes support for IDT Tsi568 serial RapidIO switch.
 
+config RAPIDIO_CPS_GEN2
+       bool "IDT CPS Gen.2 SRIO switch support"
+       depends on RAPIDIO
+       default n
+       ---help---
+         Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
 config RAPIDIO_TSI500
        bool "Tsi500 Parallel RapidIO switch support"
        depends on RAPIDIO
index fe4adc3e8d5f317ccfb08020c266138fc50d699f..48d67a6b98c89febb63780c598ad394f2fb15bc8 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_RAPIDIO_TSI57X)    += tsi57x.o
 obj-$(CONFIG_RAPIDIO_CPS_XX)   += idtcps.o
 obj-$(CONFIG_RAPIDIO_TSI568)   += tsi568.o
 obj-$(CONFIG_RAPIDIO_TSI500)   += tsi500.o
+obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
 
 ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
new file mode 100644 (file)
index 0000000..0bb871c
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * IDT CPS Gen.2 Serial RapidIO switch family support
+ *
+ * Copyright 2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+#define LOCAL_RTE_CONF_DESTID_SEL      0x010070
+#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f
+
+#define IDT_LT_ERR_REPORT_EN   0x03100c
+
+#define IDT_PORT_ERR_REPORT_EN(n)      (0x031044 + (n)*0x40)
+#define IDT_PORT_ERR_REPORT_EN_BC      0x03ff04
+
+#define IDT_PORT_ISERR_REPORT_EN(n)    (0x03104C + (n)*0x40)
+#define IDT_PORT_ISERR_REPORT_EN_BC    0x03ff0c
+#define IDT_PORT_INIT_TX_ACQUIRED      0x00000020
+
+#define IDT_LANE_ERR_REPORT_EN(n)      (0x038010 + (n)*0x100)
+#define IDT_LANE_ERR_REPORT_EN_BC      0x03ff10
+
+#define IDT_DEV_CTRL_1         0xf2000c
+#define IDT_DEV_CTRL_1_GENPW           0x02000000
+#define IDT_DEV_CTRL_1_PRSTBEH         0x00000001
+
+#define IDT_CFGBLK_ERR_CAPTURE_EN      0x020008
+#define IDT_CFGBLK_ERR_REPORT          0xf20014
+#define IDT_CFGBLK_ERR_REPORT_GENPW            0x00000002
+
+#define IDT_AUX_PORT_ERR_CAP_EN        0x020000
+#define IDT_AUX_ERR_REPORT_EN  0xf20018
+#define IDT_AUX_PORT_ERR_LOG_I2C       0x00000002
+#define IDT_AUX_PORT_ERR_LOG_JTAG      0x00000001
+
+#define        IDT_ISLTL_ADDRESS_CAP   0x021014
+
+#define IDT_RIO_DOMAIN         0xf20020
+#define IDT_RIO_DOMAIN_MASK            0x000000ff
+
+#define IDT_PW_INFO_CSR                0xf20024
+
+#define IDT_SOFT_RESET         0xf20040
+#define IDT_SOFT_RESET_REQ             0x00030097
+
+#define IDT_I2C_MCTRL          0xf20050
+#define IDT_I2C_MCTRL_GENPW            0x04000000
+
+#define IDT_JTAG_CTRL          0xf2005c
+#define IDT_JTAG_CTRL_GENPW            0x00000002
+
+#define IDT_LANE_CTRL(n)       (0xff8000 + (n)*0x100)
+#define IDT_LANE_CTRL_BC       0xffff00
+#define IDT_LANE_CTRL_GENPW            0x00200000
+#define IDT_LANE_DFE_1_BC      0xffff18
+#define IDT_LANE_DFE_2_BC      0xffff1c
+
+#define IDT_PORT_OPS(n)                (0xf40004 + (n)*0x100)
+#define IDT_PORT_OPS_GENPW             0x08000000
+#define IDT_PORT_OPS_PL_ELOG           0x00000040
+#define IDT_PORT_OPS_LL_ELOG           0x00000020
+#define IDT_PORT_OPS_LT_ELOG           0x00000010
+#define IDT_PORT_OPS_BC                0xf4ff04
+
+#define IDT_PORT_ISERR_DET(n)  (0xf40008 + (n)*0x100)
+
+#define IDT_ERR_CAP            0xfd0000
+#define IDT_ERR_CAP_LOG_OVERWR         0x00000004
+
+#define IDT_ERR_RD             0xfd0004
+
+#define IDT_DEFAULT_ROUTE      0xde
+#define IDT_NO_ROUTE           0xdf
+
+static int
+idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u16 table, u16 route_destid, u8 route_port)
+{
+       /*
+        * Select routing table to update
+        */
+       if (table == RIO_GLOBAL_TABLE)
+               table = 0;
+       else
+               table++;
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 LOCAL_RTE_CONF_DESTID_SEL, table);
+
+       /*
+        * Program destination port for the specified destID
+        */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+                                 (u32)route_destid);
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 RIO_STD_RTE_CONF_PORT_SEL_CSR,
+                                 (u32)route_port);
+       udelay(10);
+
+       return 0;
+}
+
+static int
+idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u16 table, u16 route_destid, u8 *route_port)
+{
+       u32 result;
+
+       /*
+        * Select routing table to read
+        */
+       if (table == RIO_GLOBAL_TABLE)
+               table = 0;
+       else
+               table++;
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 LOCAL_RTE_CONF_DESTID_SEL, table);
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+                                 route_destid);
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+                                RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+       if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result)
+               *route_port = RIO_INVALID_ROUTE;
+       else
+               *route_port = (u8)result;
+
+       return 0;
+}
+
+static int
+idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u16 table)
+{
+       u32 i;
+
+       /*
+        * Select routing table to read
+        */
+       if (table == RIO_GLOBAL_TABLE)
+               table = 0;
+       else
+               table++;
+
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 LOCAL_RTE_CONF_DESTID_SEL, table);
+
+       for (i = RIO_STD_RTE_CONF_EXTCFGEN;
+            i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) {
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       RIO_STD_RTE_CONF_PORT_SEL_CSR,
+                       (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) |
+                       (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE);
+               i += 4;
+       }
+
+       return 0;
+}
+
+
+static int
+idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u8 sw_domain)
+{
+       /*
+        * Switch domain configuration operates only at global level
+        */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                                 IDT_RIO_DOMAIN, (u32)sw_domain);
+       return 0;
+}
+
+static int
+idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+                      u8 *sw_domain)
+{
+       u32 regval;
+
+       /*
+        * Switch domain configuration operates only at global level
+        */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                               IDT_RIO_DOMAIN, &regval);
+
+       *sw_domain = (u8)(regval & 0xff);
+
+       return 0;
+}
+
+static int
+idtg2_em_init(struct rio_dev *rdev)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       u32 regval;
+       int i, tmp;
+
+       /*
+        * This routine performs device-specific initialization only.
+        * All standard EM configuration should be performed at upper level.
+        */
+
+       pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+
+       /* Set Port-Write info CSR: PRIO=3 and CRF=1 */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_PW_INFO_CSR, 0x0000e000);
+
+       /*
+        * Configure LT LAYER error reporting.
+        */
+
+       /* Enable standard (RIO.p8) error reporting */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_LT_ERR_REPORT_EN,
+                       REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
+                       REM_LTL_ERR_UNSUPTR);
+
+       /* Use Port-Writes for LT layer error reporting.
+        * Enable per-port reset
+        */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                       IDT_DEV_CTRL_1, &regval);
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_DEV_CTRL_1,
+                       regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
+
+       /*
+        * Configure PORT error reporting.
+        */
+
+       /* Report all RIO.p8 errors supported by device */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+
+       /* Configure reporting of implementation specific errors/events */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+
+       /* Use Port-Writes for port error reporting and enable error logging */
+       tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+       for (i = 0; i < tmp; i++) {
+               rio_mport_read_config_32(mport, destid, hopcount,
+                               IDT_PORT_OPS(i), &regval);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                               IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
+                               IDT_PORT_OPS_PL_ELOG |
+                               IDT_PORT_OPS_LL_ELOG |
+                               IDT_PORT_OPS_LT_ELOG);
+       }
+       /* Overwrite error log if full */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+
+       /*
+        * Configure LANE error reporting.
+        */
+
+       /* Disable line error reporting */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_LANE_ERR_REPORT_EN_BC, 0);
+
+       /* Use Port-Writes for lane error reporting (when enabled)
+        * (do per-lane update because lanes may have different configuration)
+        */
+       tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
+       for (i = 0; i < tmp; i++) {
+               rio_mport_read_config_32(mport, destid, hopcount,
+                               IDT_LANE_CTRL(i), &regval);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                               IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+       }
+
+       /*
+        * Configure AUX error reporting.
+        */
+
+       /* Disable JTAG and I2C Error capture */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_AUX_PORT_ERR_CAP_EN, 0);
+
+       /* Disable JTAG and I2C Error reporting/logging */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_AUX_ERR_REPORT_EN, 0);
+
+       /* Disable Port-Write notification from JTAG */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_JTAG_CTRL, 0);
+
+       /* Disable Port-Write notification from I2C */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                       IDT_I2C_MCTRL, &regval);
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_I2C_MCTRL,
+                       regval & ~IDT_I2C_MCTRL_GENPW);
+
+       /*
+        * Configure CFG_BLK error reporting.
+        */
+
+       /* Disable Configuration Block error capture */
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+
+       /* Disable Port-Writes for Configuration Block error reporting */
+       rio_mport_read_config_32(mport, destid, hopcount,
+                       IDT_CFGBLK_ERR_REPORT, &regval);
+       rio_mport_write_config_32(mport, destid, hopcount,
+                       IDT_CFGBLK_ERR_REPORT,
+                       regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+
+       /* set TVAL = ~50us */
+       rio_mport_write_config_32(mport, destid, hopcount,
+               rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+
+       return 0;
+}
+
+static int
+idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
+{
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       u32 regval, em_perrdet, em_ltlerrdet;
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+       if (em_ltlerrdet) {
+               /* Service Logical/Transport Layer Error(s) */
+               if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
+                       /* Implementation specific error reported */
+                       rio_mport_read_config_32(mport, destid, hopcount,
+                                       IDT_ISLTL_ADDRESS_CAP, &regval);
+
+                       pr_debug("RIO: %s Implementation Specific LTL errors" \
+                                " 0x%x @(0x%x)\n",
+                                rio_name(rdev), em_ltlerrdet, regval);
+
+                       /* Clear implementation specific address capture CSR */
+                       rio_mport_write_config_32(mport, destid, hopcount,
+                                       IDT_ISLTL_ADDRESS_CAP, 0);
+
+               }
+       }
+
+       rio_mport_read_config_32(mport, destid, hopcount,
+               rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+       if (em_perrdet) {
+               /* Service Port-Level Error(s) */
+               if (em_perrdet & REM_PED_IMPL_SPEC) {
+                       /* Implementation Specific port error reported */
+
+                       /* Get IS errors reported */
+                       rio_mport_read_config_32(mport, destid, hopcount,
+                                       IDT_PORT_ISERR_DET(portnum), &regval);
+
+                       pr_debug("RIO: %s Implementation Specific Port" \
+                                " errors 0x%x\n", rio_name(rdev), regval);
+
+                       /* Clear all implementation specific events */
+                       rio_mport_write_config_32(mport, destid, hopcount,
+                                       IDT_PORT_ISERR_DET(portnum), 0);
+               }
+       }
+
+       return 0;
+}
+
+static ssize_t
+idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct rio_dev *rdev = to_rio_dev(dev);
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+       ssize_t len = 0;
+       u32 regval;
+
+       while (!rio_mport_read_config_32(mport, destid, hopcount,
+                                        IDT_ERR_RD, &regval)) {
+               if (!regval)    /* 0 = end of log */
+                       break;
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                                       "%08x\n", regval);
+               if (len >= (PAGE_SIZE - 10))
+                       break;
+       }
+
+       return len;
+}
+
+static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL);
+
+static int idtg2_sysfs(struct rio_dev *rdev, int create)
+{
+       struct device *dev = &rdev->dev;
+       int err = 0;
+
+       if (create == RIO_SW_SYSFS_CREATE) {
+               /* Initialize sysfs entries */
+               err = device_create_file(dev, &dev_attr_errlog);
+               if (err)
+                       dev_err(dev, "Unable create sysfs errlog file\n");
+       } else
+               device_remove_file(dev, &dev_attr_errlog);
+
+       return err;
+}
+
+static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
+{
+       pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+       rdev->rswitch->add_entry = idtg2_route_add_entry;
+       rdev->rswitch->get_entry = idtg2_route_get_entry;
+       rdev->rswitch->clr_table = idtg2_route_clr_table;
+       rdev->rswitch->set_domain = idtg2_set_domain;
+       rdev->rswitch->get_domain = idtg2_get_domain;
+       rdev->rswitch->em_init = idtg2_em_init;
+       rdev->rswitch->em_handle = idtg2_em_handler;
+       rdev->rswitch->sw_sysfs = idtg2_sysfs;
+
+       return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
index 2c790c144f8928868e903520c3bf97dabc060c28..fc9f6374f75955a32801f056ad2a936353d5f87f 100644 (file)
@@ -117,6 +117,10 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
 
 static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
 {
+       struct rio_mport *mport = rdev->net->hport;
+       u16 destid = rdev->rswitch->destid;
+       u8 hopcount = rdev->rswitch->hopcount;
+
        pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
        rdev->rswitch->add_entry = idtcps_route_add_entry;
        rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -126,6 +130,12 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
        rdev->rswitch->em_init = NULL;
        rdev->rswitch->em_handle = NULL;
 
+       if (do_enum) {
+               /* set TVAL = ~50us */
+               rio_mport_write_config_32(mport, destid, hopcount,
+                       rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+       }
+
        return 0;
 }
 
index f7fd7898606e050630f511d12cb718cde09ef2a0..b9a389b9f812f823bb752492b1de9fa63aa5d468 100644 (file)
@@ -29,7 +29,7 @@
 #define SPP_ROUTE_CFG_DESTID(n)        (0x11070 + 0x100*n)
 #define SPP_ROUTE_CFG_PORT(n)  (0x11074 + 0x100*n)
 
-#define TSI568_SP_MODE_BC      0x10004
+#define TSI568_SP_MODE(n)      (0x11004 + 0x100*n)
 #define  TSI568_SP_MODE_PW_DIS 0x08000000
 
 static int
@@ -117,14 +117,19 @@ tsi568_em_init(struct rio_dev *rdev)
        u16 destid = rdev->rswitch->destid;
        u8 hopcount = rdev->rswitch->hopcount;
        u32 regval;
+       int portnum;
 
        pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
 
        /* Make sure that Port-Writes are disabled (for all ports) */
-       rio_mport_read_config_32(mport, destid, hopcount,
-                       TSI568_SP_MODE_BC, &regval);
-       rio_mport_write_config_32(mport, destid, hopcount,
-                       TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
+       for (portnum = 0;
+            portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
+               rio_mport_read_config_32(mport, destid, hopcount,
+                               TSI568_SP_MODE(portnum), &regval);
+               rio_mport_write_config_32(mport, destid, hopcount,
+                               TSI568_SP_MODE(portnum),
+                               regval | TSI568_SP_MODE_PW_DIS);
+       }
 
        return 0;
 }
index d34df722d95fb9731b21c7ce5a17bda8120e9344..2003fb63c404ed9839da7e4167bfb66a7153ca3f 100644 (file)
@@ -166,7 +166,8 @@ tsi57x_em_init(struct rio_dev *rdev)
 
        pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
 
-       for (portnum = 0; portnum < 16; portnum++) {
+       for (portnum = 0;
+            portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
                /* Make sure that Port-Writes are enabled (for all ports) */
                rio_mport_read_config_32(mport, destid, hopcount,
                                TSI578_SP_MODE(portnum), &regval);
@@ -205,6 +206,10 @@ tsi57x_em_init(struct rio_dev *rdev)
                        portnum++;
        }
 
+       /* set TVAL = ~50us */
+       rio_mport_write_config_32(mport, destid, hopcount,
+               rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
+
        return 0;
 }
 
index 2785a0f16c9f656bd910e09ba0a37ed223c7078c..6a77437d4f5a51548daf24fb9f134e810b9b7f99 100644 (file)
@@ -171,7 +171,8 @@ config RTC_DRV_DS3232
        depends on RTC_CLASS && I2C
        help
          If you say yes here you get support for Dallas Semiconductor
-         DS3232 real-time clock chips.
+         DS3232 real-time clock chips. If an interrupt is associated
+         with the device, the alarm functionality is supported.
 
          This driver can also be built as a module.  If so, the module
          will be called rtc-ds3232.
@@ -952,4 +953,13 @@ config RTC_DRV_JZ4740
          This driver can also be buillt as a module. If so, the module
          will be called rtc-jz4740.
 
+config RTC_DRV_LPC32XX
+       depends on ARCH_LPC32XX
+       tristate "NXP LPC32XX RTC"
+       help
+         This enables support for the NXP RTC in the LPC32XX
+
+         This driver can also be buillt as a module. If so, the module
+         will be called rtc-lpc32xx.
+
 endif # RTC_CLASS
index 0f207b3b58339cbd4232db464de93443f2c5ba9e..7a7cb3228a1d75a7bedfc0e39ec478a529371249 100644 (file)
@@ -51,6 +51,7 @@ obj-$(CONFIG_RTC_DRV_IMXDI)   += rtc-imxdi.o
 obj-$(CONFIG_RTC_DRV_ISL1208)  += rtc-isl1208.o
 obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
 obj-$(CONFIG_RTC_DRV_JZ4740)   += rtc-jz4740.o
+obj-$(CONFIG_RTC_DRV_LPC32XX)  += rtc-lpc32xx.o
 obj-$(CONFIG_RTC_DRV_M41T80)   += rtc-m41t80.o
 obj-$(CONFIG_RTC_DRV_M41T94)   += rtc-m41t94.o
 obj-$(CONFIG_RTC_DRV_M48T35)   += rtc-m48t35.o
index 565562ba6ac9dacc7d8aa4842c7ddedb29de074d..e6539cbabb35fb8f143f1710a004744fa1abe745 100644 (file)
@@ -158,8 +158,10 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
        rtc_dev_prepare(rtc);
 
        err = device_register(&rtc->dev);
-       if (err)
+       if (err) {
+               put_device(&rtc->dev);
                goto exit_kfree;
+       }
 
        rtc_dev_add_device(rtc);
        rtc_sysfs_add_device(rtc);
index d4fb82d85e9b36ab61e1626236cb98bf76364ea2..b4b6087f22343ac66727090356d9abedabd91d42 100644 (file)
@@ -2,7 +2,7 @@
  * Blackfin On-Chip Real Time Clock Driver
  *  Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
  *
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
  *
  * Enter bugs at http://blackfin.uclinux.org/
  *
@@ -183,29 +183,33 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
        struct bfin_rtc *rtc = dev_get_drvdata(dev);
        unsigned long events = 0;
        bool write_complete = false;
-       u16 rtc_istat, rtc_ictl;
+       u16 rtc_istat, rtc_istat_clear, rtc_ictl, bits;
 
        dev_dbg_stamp(dev);
 
        rtc_istat = bfin_read_RTC_ISTAT();
        rtc_ictl = bfin_read_RTC_ICTL();
+       rtc_istat_clear = 0;
 
-       if (rtc_istat & RTC_ISTAT_WRITE_COMPLETE) {
-               bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
+       bits = RTC_ISTAT_WRITE_COMPLETE;
+       if (rtc_istat & bits) {
+               rtc_istat_clear |= bits;
                write_complete = true;
                complete(&bfin_write_complete);
        }
 
-       if (rtc_ictl & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
-               if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
-                       bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+       bits = (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+       if (rtc_ictl & bits) {
+               if (rtc_istat & bits) {
+                       rtc_istat_clear |= bits;
                        events |= RTC_AF | RTC_IRQF;
                }
        }
 
-       if (rtc_ictl & RTC_ISTAT_SEC) {
-               if (rtc_istat & RTC_ISTAT_SEC) {
-                       bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
+       bits = RTC_ISTAT_SEC;
+       if (rtc_ictl & bits) {
+               if (rtc_istat & bits) {
+                       rtc_istat_clear |= bits;
                        events |= RTC_UF | RTC_IRQF;
                }
        }
@@ -213,9 +217,10 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
        if (events)
                rtc_update_irq(rtc->rtc_dev, 1, events);
 
-       if (write_complete || events)
+       if (write_complete || events) {
+               bfin_write_RTC_ISTAT(rtc_istat_clear);
                return IRQ_HANDLED;
-       else
+       else
                return IRQ_NONE;
 }
 
@@ -422,9 +427,13 @@ static int __devexit bfin_rtc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
 {
-       if (device_may_wakeup(&pdev->dev)) {
+       struct device *dev = &pdev->dev;
+
+       dev_dbg_stamp(dev);
+
+       if (device_may_wakeup(dev)) {
                enable_irq_wake(IRQ_RTC);
-               bfin_rtc_sync_pending(&pdev->dev);
+               bfin_rtc_sync_pending(dev);
        } else
                bfin_rtc_int_clear(0);
 
@@ -433,7 +442,11 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
 
 static int bfin_rtc_resume(struct platform_device *pdev)
 {
-       if (device_may_wakeup(&pdev->dev))
+       struct device *dev = &pdev->dev;
+
+       dev_dbg_stamp(dev);
+
+       if (device_may_wakeup(dev))
                disable_irq_wake(IRQ_RTC);
 
        /*
index 9de8516e3531e70bad818747f41de4b8052486bd..57063552d3b76c538a64f692631a284f0d16a8dc 100644 (file)
@@ -2,6 +2,7 @@
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
  * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -175,6 +176,182 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
                                              DS3232_REG_SECONDS, 7, buf);
 }
 
+/*
+ * DS3232 has two alarm, we only use alarm1
+ * According to linux specification, only support one-shot alarm
+ * no periodic alarm mode
+ */
+static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+       int control, stat;
+       int ret;
+       u8 buf[4];
+
+       mutex_lock(&ds3232->mutex);
+
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+       if (ret < 0)
+               goto out;
+       stat = ret;
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+       if (ret < 0)
+               goto out;
+       control = ret;
+       ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+       if (ret < 0)
+               goto out;
+
+       alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+       alarm->time.tm_min = bcd2bin(buf[1] & 0x7F);
+       alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F);
+       alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F);
+
+       alarm->time.tm_mon = -1;
+       alarm->time.tm_year = -1;
+       alarm->time.tm_wday = -1;
+       alarm->time.tm_yday = -1;
+       alarm->time.tm_isdst = -1;
+
+       alarm->enabled = !!(control & DS3232_REG_CR_A1IE);
+       alarm->pending = !!(stat & DS3232_REG_SR_A1F);
+
+       ret = 0;
+out:
+       mutex_unlock(&ds3232->mutex);
+       return ret;
+}
+
+/*
+ * linux rtc-module does not support wday alarm
+ * and only 24h time mode supported indeed
+ */
+static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+       int control, stat;
+       int ret;
+       u8 buf[4];
+
+       if (client->irq <= 0)
+               return -EINVAL;
+
+       mutex_lock(&ds3232->mutex);
+
+       buf[0] = bin2bcd(alarm->time.tm_sec);
+       buf[1] = bin2bcd(alarm->time.tm_min);
+       buf[2] = bin2bcd(alarm->time.tm_hour);
+       buf[3] = bin2bcd(alarm->time.tm_mday);
+
+       /* clear alarm interrupt enable bit */
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+       if (ret < 0)
+               goto out;
+       control = ret;
+       control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
+       ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+       if (ret < 0)
+               goto out;
+
+       /* clear any pending alarm flag */
+       ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+       if (ret < 0)
+               goto out;
+       stat = ret;
+       stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
+       ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+       if (ret < 0)
+               goto out;
+
+       ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+
+       if (alarm->enabled) {
+               control |= DS3232_REG_CR_A1IE;
+               ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+       }
+out:
+       mutex_unlock(&ds3232->mutex);
+       return ret;
+}
+
+static void ds3232_update_alarm(struct i2c_client *client)
+{
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+       int control;
+       int ret;
+       u8 buf[4];
+
+       mutex_lock(&ds3232->mutex);
+
+       ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+       if (ret < 0)
+               goto unlock;
+
+       buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[0];
+       buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[1];
+       buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[2];
+       buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+                                                               0x80 : buf[3];
+
+       ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+       if (ret < 0)
+               goto unlock;
+
+       control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+       if (control < 0)
+               goto unlock;
+
+       if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF))
+               /* enable alarm1 interrupt */
+               control |= DS3232_REG_CR_A1IE;
+       else
+               /* disable alarm1 interrupt */
+               control &= ~(DS3232_REG_CR_A1IE);
+       i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+
+unlock:
+       mutex_unlock(&ds3232->mutex);
+}
+
+static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+       if (client->irq <= 0)
+               return -EINVAL;
+
+       if (enabled)
+               ds3232->rtc->irq_data |= RTC_AF;
+       else
+               ds3232->rtc->irq_data &= ~RTC_AF;
+
+       ds3232_update_alarm(client);
+       return 0;
+}
+
+static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+       if (client->irq <= 0)
+               return -EINVAL;
+
+       if (enabled)
+               ds3232->rtc->irq_data |= RTC_UF;
+       else
+               ds3232->rtc->irq_data &= ~RTC_UF;
+
+       ds3232_update_alarm(client);
+       return 0;
+}
+
 static irqreturn_t ds3232_irq(int irq, void *dev_id)
 {
        struct i2c_client *client = dev_id;
@@ -222,6 +399,10 @@ unlock:
 static const struct rtc_class_ops ds3232_rtc_ops = {
        .read_time = ds3232_read_time,
        .set_time = ds3232_set_time,
+       .read_alarm = ds3232_read_alarm,
+       .set_alarm = ds3232_set_alarm,
+       .alarm_irq_enable = ds3232_alarm_irq_enable,
+       .update_irq_enable = ds3232_update_irq_enable,
 };
 
 static int __devinit ds3232_probe(struct i2c_client *client,
index 2619d57b91d76ee83236bd47a967d093984712a1..2e16f72c90569fdb0fe6e26b16f6257e1604de2b 100644 (file)
@@ -1,5 +1,6 @@
 /*
  *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ *  Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
  *      JZ4740 SoC RTC driver
  *
  *  This program is free software; you can redistribute it and/or modify it
@@ -161,7 +162,8 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
        if (!ret)
-               ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE, alrm->enabled);
+               ret = jz4740_rtc_ctrl_set_bits(rtc,
+                       JZ_RTC_CTRL_AE | JZ_RTC_CTRL_AF_IRQ, alrm->enabled);
 
        return ret;
 }
@@ -258,6 +260,8 @@ static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
+       device_init_wakeup(&pdev->dev, 1);
+
        rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops,
                                        THIS_MODULE);
        if (IS_ERR(rtc->rtc)) {
@@ -318,12 +322,43 @@ static int __devexit jz4740_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+
+#ifdef CONFIG_PM
+static int jz4740_rtc_suspend(struct device *dev)
+{
+       struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(rtc->irq);
+       return 0;
+}
+
+static int jz4740_rtc_resume(struct device *dev)
+{
+       struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(rtc->irq);
+       return 0;
+}
+
+static const struct dev_pm_ops jz4740_pm_ops = {
+       .suspend = jz4740_rtc_suspend,
+       .resume  = jz4740_rtc_resume,
+};
+#define JZ4740_RTC_PM_OPS (&jz4740_pm_ops)
+
+#else
+#define JZ4740_RTC_PM_OPS NULL
+#endif  /* CONFIG_PM */
+
 struct platform_driver jz4740_rtc_driver = {
-       .probe = jz4740_rtc_probe,
-       .remove = __devexit_p(jz4740_rtc_remove),
-       .driver = {
-               .name = "jz4740-rtc",
+       .probe   = jz4740_rtc_probe,
+       .remove  = __devexit_p(jz4740_rtc_remove),
+       .driver  = {
+               .name  = "jz4740-rtc",
                .owner = THIS_MODULE,
+               .pm    = JZ4740_RTC_PM_OPS,
        },
 };
 
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
new file mode 100644 (file)
index 0000000..ec8701c
--- /dev/null
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * Clock and Power control register offsets
+ */
+#define LPC32XX_RTC_UCOUNT             0x00
+#define LPC32XX_RTC_DCOUNT             0x04
+#define LPC32XX_RTC_MATCH0             0x08
+#define LPC32XX_RTC_MATCH1             0x0C
+#define LPC32XX_RTC_CTRL               0x10
+#define LPC32XX_RTC_INTSTAT            0x14
+#define LPC32XX_RTC_KEY                        0x18
+#define LPC32XX_RTC_SRAM               0x80
+
+#define LPC32XX_RTC_CTRL_MATCH0                (1 << 0)
+#define LPC32XX_RTC_CTRL_MATCH1                (1 << 1)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH0   (1 << 2)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH1   (1 << 3)
+#define LPC32XX_RTC_CTRL_SW_RESET      (1 << 4)
+#define LPC32XX_RTC_CTRL_CNTR_DIS      (1 << 6)
+#define LPC32XX_RTC_CTRL_ONSW_FORCE_HI (1 << 7)
+
+#define LPC32XX_RTC_INTSTAT_MATCH0     (1 << 0)
+#define LPC32XX_RTC_INTSTAT_MATCH1     (1 << 1)
+#define LPC32XX_RTC_INTSTAT_ONSW       (1 << 2)
+
+#define LPC32XX_RTC_KEY_ONSW_LOADVAL   0xB5C13F27
+
+#define RTC_NAME "rtc-lpc32xx"
+
+#define rtc_readl(dev, reg) \
+       __raw_readl((dev)->rtc_base + (reg))
+#define rtc_writel(dev, reg, val) \
+       __raw_writel((val), (dev)->rtc_base + (reg))
+
+struct lpc32xx_rtc {
+       void __iomem *rtc_base;
+       int irq;
+       unsigned char alarm_enabled;
+       struct rtc_device *rtc;
+       spinlock_t lock;
+};
+
+static int lpc32xx_rtc_read_time(struct device *dev, struct rtc_time *time)
+{
+       unsigned long elapsed_sec;
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+       elapsed_sec = rtc_readl(rtc, LPC32XX_RTC_UCOUNT);
+       rtc_time_to_tm(elapsed_sec, time);
+
+       return rtc_valid_tm(time);
+}
+
+static int lpc32xx_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+       u32 tmp;
+
+       spin_lock_irq(&rtc->lock);
+
+       /* RTC must be disabled during count update */
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp | LPC32XX_RTC_CTRL_CNTR_DIS);
+       rtc_writel(rtc, LPC32XX_RTC_UCOUNT, secs);
+       rtc_writel(rtc, LPC32XX_RTC_DCOUNT, 0xFFFFFFFF - secs);
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp &= ~LPC32XX_RTC_CTRL_CNTR_DIS);
+
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static int lpc32xx_rtc_read_alarm(struct device *dev,
+       struct rtc_wkalrm *wkalrm)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+       rtc_time_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time);
+       wkalrm->enabled = rtc->alarm_enabled;
+       wkalrm->pending = !!(rtc_readl(rtc, LPC32XX_RTC_INTSTAT) &
+               LPC32XX_RTC_INTSTAT_MATCH0);
+
+       return rtc_valid_tm(&wkalrm->time);
+}
+
+static int lpc32xx_rtc_set_alarm(struct device *dev,
+       struct rtc_wkalrm *wkalrm)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+       unsigned long alarmsecs;
+       u32 tmp;
+       int ret;
+
+       ret = rtc_tm_to_time(&wkalrm->time, &alarmsecs);
+       if (ret < 0) {
+               dev_warn(dev, "Failed to convert time: %d\n", ret);
+               return ret;
+       }
+
+       spin_lock_irq(&rtc->lock);
+
+       /* Disable alarm during update */
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+
+       rtc_writel(rtc, LPC32XX_RTC_MATCH0, alarmsecs);
+
+       rtc->alarm_enabled = wkalrm->enabled;
+       if (wkalrm->enabled) {
+               rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+                          LPC32XX_RTC_INTSTAT_MATCH0);
+               rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp |
+                          LPC32XX_RTC_CTRL_MATCH0);
+       }
+
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static int lpc32xx_rtc_alarm_irq_enable(struct device *dev,
+       unsigned int enabled)
+{
+       struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+       u32 tmp;
+
+       spin_lock_irq(&rtc->lock);
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+
+       if (enabled) {
+               rtc->alarm_enabled = 1;
+               tmp |= LPC32XX_RTC_CTRL_MATCH0;
+       } else {
+               rtc->alarm_enabled = 0;
+               tmp &= ~LPC32XX_RTC_CTRL_MATCH0;
+       }
+
+       rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static irqreturn_t lpc32xx_rtc_alarm_interrupt(int irq, void *dev)
+{
+       struct lpc32xx_rtc *rtc = dev;
+
+       spin_lock(&rtc->lock);
+
+       /* Disable alarm interrupt */
+       rtc_writel(rtc, LPC32XX_RTC_CTRL,
+               rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+                         ~LPC32XX_RTC_CTRL_MATCH0);
+       rtc->alarm_enabled = 0;
+
+       /*
+        * Write a large value to the match value so the RTC won't
+        * keep firing the match status
+        */
+       rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+       rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0);
+
+       spin_unlock(&rtc->lock);
+
+       rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+       return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops lpc32xx_rtc_ops = {
+       .read_time              = lpc32xx_rtc_read_time,
+       .set_mmss               = lpc32xx_rtc_set_mmss,
+       .read_alarm             = lpc32xx_rtc_read_alarm,
+       .set_alarm              = lpc32xx_rtc_set_alarm,
+       .alarm_irq_enable       = lpc32xx_rtc_alarm_irq_enable,
+};
+
+static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct lpc32xx_rtc *rtc;
+       resource_size_t size;
+       int rtcirq;
+       u32 tmp;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Can't get memory resource\n");
+               return -ENOENT;
+       }
+
+       rtcirq = platform_get_irq(pdev, 0);
+       if (rtcirq < 0 || rtcirq >= NR_IRQS) {
+               dev_warn(&pdev->dev, "Can't get interrupt resource\n");
+               rtcirq = -1;
+       }
+
+       rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+       if (unlikely(!rtc)) {
+               dev_err(&pdev->dev, "Can't allocate memory\n");
+               return -ENOMEM;
+       }
+       rtc->irq = rtcirq;
+
+       size = resource_size(res);
+
+       if (!devm_request_mem_region(&pdev->dev, res->start, size,
+                                    pdev->name)) {
+               dev_err(&pdev->dev, "RTC registers are not free\n");
+               return -EBUSY;
+       }
+
+       rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
+       if (!rtc->rtc_base) {
+               dev_err(&pdev->dev, "Can't map memory\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_init(&rtc->lock);
+
+       /*
+        * The RTC is on a seperate power domain and can keep it's state
+        * across a chip power cycle. If the RTC has never been previously
+        * setup, then set it up now for the first time.
+        */
+       tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+       if (rtc_readl(rtc, LPC32XX_RTC_KEY) != LPC32XX_RTC_KEY_ONSW_LOADVAL) {
+               tmp &= ~(LPC32XX_RTC_CTRL_SW_RESET |
+                       LPC32XX_RTC_CTRL_CNTR_DIS |
+                       LPC32XX_RTC_CTRL_MATCH0 |
+                       LPC32XX_RTC_CTRL_MATCH1 |
+                       LPC32XX_RTC_CTRL_ONSW_MATCH0 |
+                       LPC32XX_RTC_CTRL_ONSW_MATCH1 |
+                       LPC32XX_RTC_CTRL_ONSW_FORCE_HI);
+               rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+
+               /* Clear latched interrupt states */
+               rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+               rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+                          LPC32XX_RTC_INTSTAT_MATCH0 |
+                          LPC32XX_RTC_INTSTAT_MATCH1 |
+                          LPC32XX_RTC_INTSTAT_ONSW);
+
+               /* Write key value to RTC so it won't reload on reset */
+               rtc_writel(rtc, LPC32XX_RTC_KEY,
+                          LPC32XX_RTC_KEY_ONSW_LOADVAL);
+       } else {
+               rtc_writel(rtc, LPC32XX_RTC_CTRL,
+                          tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+       }
+
+       platform_set_drvdata(pdev, rtc);
+
+       rtc->rtc = rtc_device_register(RTC_NAME, &pdev->dev, &lpc32xx_rtc_ops,
+               THIS_MODULE);
+       if (IS_ERR(rtc->rtc)) {
+               dev_err(&pdev->dev, "Can't get RTC\n");
+               platform_set_drvdata(pdev, NULL);
+               return PTR_ERR(rtc->rtc);
+       }
+
+       /*
+        * IRQ is enabled after device registration in case alarm IRQ
+        * is pending upon suspend exit.
+        */
+       if (rtc->irq >= 0) {
+               if (devm_request_irq(&pdev->dev, rtc->irq,
+                                    lpc32xx_rtc_alarm_interrupt,
+                                    IRQF_DISABLED, pdev->name, rtc) < 0) {
+                       dev_warn(&pdev->dev, "Can't request interrupt.\n");
+                       rtc->irq = -1;
+               } else {
+                       device_init_wakeup(&pdev->dev, 1);
+               }
+       }
+
+       return 0;
+}
+
+static int __devexit lpc32xx_rtc_remove(struct platform_device *pdev)
+{
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->irq >= 0)
+               device_init_wakeup(&pdev->dev, 0);
+
+       platform_set_drvdata(pdev, NULL);
+       rtc_device_unregister(rtc->rtc);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_rtc_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->irq >= 0) {
+               if (device_may_wakeup(&pdev->dev))
+                       enable_irq_wake(rtc->irq);
+               else
+                       disable_irq_wake(rtc->irq);
+       }
+
+       return 0;
+}
+
+static int lpc32xx_rtc_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->irq >= 0 && device_may_wakeup(&pdev->dev))
+               disable_irq_wake(rtc->irq);
+
+       return 0;
+}
+
+/* Unconditionally disable the alarm */
+static int lpc32xx_rtc_freeze(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       spin_lock_irq(&rtc->lock);
+
+       rtc_writel(rtc, LPC32XX_RTC_CTRL,
+               rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+                         ~LPC32XX_RTC_CTRL_MATCH0);
+
+       spin_unlock_irq(&rtc->lock);
+
+       return 0;
+}
+
+static int lpc32xx_rtc_thaw(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (rtc->alarm_enabled) {
+               spin_lock_irq(&rtc->lock);
+
+               rtc_writel(rtc, LPC32XX_RTC_CTRL,
+                          rtc_readl(rtc, LPC32XX_RTC_CTRL) |
+                          LPC32XX_RTC_CTRL_MATCH0);
+
+               spin_unlock_irq(&rtc->lock);
+       }
+
+       return 0;
+}
+
+static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
+       .suspend = lpc32xx_rtc_suspend,
+       .resume = lpc32xx_rtc_resume,
+       .freeze = lpc32xx_rtc_freeze,
+       .thaw = lpc32xx_rtc_thaw,
+       .restore = lpc32xx_rtc_resume
+};
+
+#define LPC32XX_RTC_PM_OPS (&lpc32xx_rtc_pm_ops)
+#else
+#define LPC32XX_RTC_PM_OPS NULL
+#endif
+
+static struct platform_driver lpc32xx_rtc_driver = {
+       .probe          = lpc32xx_rtc_probe,
+       .remove         = __devexit_p(lpc32xx_rtc_remove),
+       .driver = {
+               .name   = RTC_NAME,
+               .owner  = THIS_MODULE,
+               .pm     = LPC32XX_RTC_PM_OPS
+       },
+};
+
+static int __init lpc32xx_rtc_init(void)
+{
+       return platform_driver_register(&lpc32xx_rtc_driver);
+}
+module_init(lpc32xx_rtc_init);
+
+static void __exit lpc32xx_rtc_exit(void)
+{
+       platform_driver_unregister(&lpc32xx_rtc_driver);
+}
+module_exit(lpc32xx_rtc_exit);
+
+MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
+MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc-lpc32xx");
index 64d9727b722970a080155e0527676e9f0a1defcf..73377b0d65dabe5f37d241e748c99ed4507ee45e 100644 (file)
@@ -34,7 +34,8 @@
  * Board-specific wiring options include using split power mode with
  * RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
  * and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
- * low power modes).  See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ * low power modes) for OMAP1 boards (OMAP-L138 has this built into
+ * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
  */
 
 #define OMAP_RTC_BASE                  0xfffb4800
@@ -401,16 +402,17 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
 
        /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
         *
-        *  - Boards wired so that RTC_WAKE_INT does something, and muxed
-        *    right (W13_1610_RTC_WAKE_INT is the default after chip reset),
-        *    should initialize the device wakeup flag appropriately.
+        *  - Device wake-up capability setting should come through chip
+        *    init logic. OMAP1 boards should initialize the "wakeup capable"
+        *    flag in the platform device if the board is wired right for
+        *    being woken up by RTC alarm. For OMAP-L138, this capability
+        *    is built into the SoC by the "Deep Sleep" capability.
         *
         *  - Boards wired so RTC_ON_nOFF is used as the reset signal,
         *    rather than nPWRON_RESET, should forcibly enable split
         *    power mode.  (Some chip errata report that RTC_CTRL_SPLIT
         *    is write-only, and always reads as zero...)
         */
-       device_init_wakeup(&pdev->dev, 0);
 
        if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
                pr_info("%s: split power mode\n", pdev->name);
index f57a87f4ae96abb367a2e08d353378b31f2a19fa..cf953ecbfca934e8b9c7058f976610ff092016ae 100644 (file)
@@ -100,7 +100,7 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
        spin_lock_irq(&s3c_rtc_pie_lock);
 
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+               tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
                tmp &= ~S3C64XX_RTCCON_TICEN;
 
                if (enabled)
@@ -171,8 +171,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
                goto retry_get_time;
        }
 
-       pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
-                rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+       pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+                1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
                 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
 
        rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
@@ -185,7 +185,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
        rtc_tm->tm_year += 100;
        rtc_tm->tm_mon -= 1;
 
-       return 0;
+       return rtc_valid_tm(rtc_tm);
 }
 
 static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
@@ -193,8 +193,8 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
-                tm->tm_year, tm->tm_mon, tm->tm_mday,
+       pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
+                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
 
        /* we get around y2k by simply not supporting it */
@@ -231,9 +231,9 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
 
-       pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+       pr_debug("read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
                 alm_en,
-                alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+                1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
                 alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
 
 
@@ -242,34 +242,34 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
        if (alm_en & S3C2410_RTCALM_SECEN)
                alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
        else
-               alm_tm->tm_sec = 0xff;
+               alm_tm->tm_sec = -1;
 
        if (alm_en & S3C2410_RTCALM_MINEN)
                alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
        else
-               alm_tm->tm_min = 0xff;
+               alm_tm->tm_min = -1;
 
        if (alm_en & S3C2410_RTCALM_HOUREN)
                alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
        else
-               alm_tm->tm_hour = 0xff;
+               alm_tm->tm_hour = -1;
 
        if (alm_en & S3C2410_RTCALM_DAYEN)
                alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
        else
-               alm_tm->tm_mday = 0xff;
+               alm_tm->tm_mday = -1;
 
        if (alm_en & S3C2410_RTCALM_MONEN) {
                alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
                alm_tm->tm_mon -= 1;
        } else {
-               alm_tm->tm_mon = 0xff;
+               alm_tm->tm_mon = -1;
        }
 
        if (alm_en & S3C2410_RTCALM_YEAREN)
                alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
        else
-               alm_tm->tm_year = 0xffff;
+               alm_tm->tm_year = -1;
 
        return 0;
 }
@@ -280,10 +280,10 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        void __iomem *base = s3c_rtc_base;
        unsigned int alrm_en;
 
-       pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+       pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
                 alrm->enabled,
-                tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
-                tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+                tm->tm_hour, tm->tm_min, tm->tm_sec);
 
 
        alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
@@ -318,7 +318,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
        unsigned int ticnt;
 
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
+               ticnt = readw(s3c_rtc_base + S3C2410_RTCCON);
                ticnt &= S3C64XX_RTCCON_TICEN;
        } else {
                ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
@@ -379,7 +379,8 @@ static const struct rtc_class_ops s3c_rtcops = {
        .set_alarm      = s3c_rtc_setalarm,
        .irq_set_freq   = s3c_rtc_setfreq,
        .irq_set_state  = s3c_rtc_setpie,
-       .proc           = s3c_rtc_proc,
+       .proc           = s3c_rtc_proc,
+       .alarm_irq_enable = s3c_rtc_setaie,
 };
 
 static void s3c_rtc_enable(struct platform_device *pdev, int en)
@@ -391,11 +392,11 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
                return;
 
        if (!en) {
-               tmp = readb(base + S3C2410_RTCCON);
+               tmp = readw(base + S3C2410_RTCCON);
                if (s3c_rtc_cpu_type == TYPE_S3C64XX)
                        tmp &= ~S3C64XX_RTCCON_TICEN;
                tmp &= ~S3C2410_RTCCON_RTCEN;
-               writeb(tmp, base + S3C2410_RTCCON);
+               writew(tmp, base + S3C2410_RTCCON);
 
                if (s3c_rtc_cpu_type == TYPE_S3C2410) {
                        tmp = readb(base + S3C2410_TICNT);
@@ -405,25 +406,28 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
        } else {
                /* re-enable the device, and check it is ok */
 
-               if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
+               if ((readw(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0) {
                        dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
 
-                       tmp = readb(base + S3C2410_RTCCON);
-                       writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
+                       tmp = readw(base + S3C2410_RTCCON);
+                       writew(tmp | S3C2410_RTCCON_RTCEN,
+                               base + S3C2410_RTCCON);
                }
 
-               if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
+               if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)) {
                        dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
 
-                       tmp = readb(base + S3C2410_RTCCON);
-                       writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
+                       tmp = readw(base + S3C2410_RTCCON);
+                       writew(tmp & ~S3C2410_RTCCON_CNTSEL,
+                               base + S3C2410_RTCCON);
                }
 
-               if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
+               if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)) {
                        dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
 
-                       tmp = readb(base + S3C2410_RTCCON);
-                       writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
+                       tmp = readw(base + S3C2410_RTCCON);
+                       writew(tmp & ~S3C2410_RTCCON_CLKRST,
+                               base + S3C2410_RTCCON);
                }
        }
 }
@@ -452,8 +456,8 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
 static int __devinit s3c_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
+       struct rtc_time rtc_tm;
        struct resource *res;
-       unsigned int tmp, i;
        int ret;
 
        pr_debug("%s: probe=%p\n", __func__, pdev);
@@ -514,8 +518,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
 
        s3c_rtc_enable(pdev, 1);
 
-       pr_debug("s3c2410_rtc: RTCCON=%02x\n",
-                readb(s3c_rtc_base + S3C2410_RTCCON));
+       pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+                readw(s3c_rtc_base + S3C2410_RTCCON));
 
        device_init_wakeup(&pdev->dev, 1);
 
@@ -534,11 +538,19 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
 
        /* Check RTC Time */
 
-       for (i = S3C2410_RTCSEC; i <= S3C2410_RTCYEAR; i += 0x4) {
-               tmp = readb(s3c_rtc_base + i);
+       s3c_rtc_gettime(NULL, &rtc_tm);
+
+       if (rtc_valid_tm(&rtc_tm)) {
+               rtc_tm.tm_year  = 100;
+               rtc_tm.tm_mon   = 0;
+               rtc_tm.tm_mday  = 1;
+               rtc_tm.tm_hour  = 0;
+               rtc_tm.tm_min   = 0;
+               rtc_tm.tm_sec   = 0;
+
+               s3c_rtc_settime(NULL, &rtc_tm);
 
-               if ((tmp & 0xf) > 0x9 || ((tmp >> 4) & 0xf) > 0x9)
-                       writeb(0, s3c_rtc_base + i);
+               dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
        }
 
        if (s3c_rtc_cpu_type == TYPE_S3C64XX)
@@ -578,7 +590,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
        /* save TICNT for anyone using periodic interrupts */
        ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
        if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
-               ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
+               ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
                ticnt_en_save &= S3C64XX_RTCCON_TICEN;
        }
        s3c_rtc_enable(pdev, 0);
@@ -596,8 +608,8 @@ static int s3c_rtc_resume(struct platform_device *pdev)
        s3c_rtc_enable(pdev, 1);
        writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
        if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
-               tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
-               writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+               tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+               writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
 
        if (device_may_wakeup(&pdev->dev))
index 42e303ff862a43199172f8479eedecfb1bc11394..0e6aa3d96a4246f7c05b0274a2295a3275d6d2ac 100644 (file)
@@ -697,9 +697,9 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        struct inode *inode = file->f_path.dentry->d_inode;
        int fbidx = iminor(inode);
        struct fb_info *info = registered_fb[fbidx];
-       u32 *buffer, *dst;
-       u32 __iomem *src;
-       int c, i, cnt = 0, err = 0;
+       u8 *buffer, *dst;
+       u8 __iomem *src;
+       int c, cnt = 0, err = 0;
        unsigned long total_size;
 
        if (!info || ! info->screen_base)
@@ -730,7 +730,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        if (!buffer)
                return -ENOMEM;
 
-       src = (u32 __iomem *) (info->screen_base + p);
+       src = (u8 __iomem *) (info->screen_base + p);
 
        if (info->fbops->fb_sync)
                info->fbops->fb_sync(info);
@@ -738,17 +738,9 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        while (count) {
                c  = (count > PAGE_SIZE) ? PAGE_SIZE : count;
                dst = buffer;
-               for (i = c >> 2; i--; )
-                       *dst++ = fb_readl(src++);
-               if (c & 3) {
-                       u8 *dst8 = (u8 *) dst;
-                       u8 __iomem *src8 = (u8 __iomem *) src;
-
-                       for (i = c & 3; i--;)
-                               *dst8++ = fb_readb(src8++);
-
-                       src = (u32 __iomem *) src8;
-               }
+               fb_memcpy_fromfb(dst, src, c);
+               dst += c;
+               src += c;
 
                if (copy_to_user(buf, buffer, c)) {
                        err = -EFAULT;
@@ -772,9 +764,9 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
        struct inode *inode = file->f_path.dentry->d_inode;
        int fbidx = iminor(inode);
        struct fb_info *info = registered_fb[fbidx];
-       u32 *buffer, *src;
-       u32 __iomem *dst;
-       int c, i, cnt = 0, err = 0;
+       u8 *buffer, *src;
+       u8 __iomem *dst;
+       int c, cnt = 0, err = 0;
        unsigned long total_size;
 
        if (!info || !info->screen_base)
@@ -811,7 +803,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
        if (!buffer)
                return -ENOMEM;
 
-       dst = (u32 __iomem *) (info->screen_base + p);
+       dst = (u8 __iomem *) (info->screen_base + p);
 
        if (info->fbops->fb_sync)
                info->fbops->fb_sync(info);
@@ -825,19 +817,9 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
                        break;
                }
 
-               for (i = c >> 2; i--; )
-                       fb_writel(*src++, dst++);
-
-               if (c & 3) {
-                       u8 *src8 = (u8 *) src;
-                       u8 __iomem *dst8 = (u8 __iomem *) dst;
-
-                       for (i = c & 3; i--; )
-                               fb_writeb(*src8++, dst8++);
-
-                       dst = (u32 __iomem *) dst8;
-               }
-
+               fb_memcpy_tofb(dst, src, c);
+               dst += c;
+               src += c;
                *ppos += c;
                buf += c;
                cnt += c;
@@ -877,13 +859,13 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
 
        if ((err = info->fbops->fb_pan_display(var, info)))
                return err;
-        info->var.xoffset = var->xoffset;
-        info->var.yoffset = var->yoffset;
-        if (var->vmode & FB_VMODE_YWRAP)
-                info->var.vmode |= FB_VMODE_YWRAP;
-        else
-                info->var.vmode &= ~FB_VMODE_YWRAP;
-        return 0;
+       info->var.xoffset = var->xoffset;
+       info->var.yoffset = var->yoffset;
+       if (var->vmode & FB_VMODE_YWRAP)
+               info->var.vmode |= FB_VMODE_YWRAP;
+       else
+               info->var.vmode &= ~FB_VMODE_YWRAP;
+       return 0;
 }
 
 static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
index ca3355e430bfd18ac5e55897fd4cab633d3772f8..933899dca33a9ff4bd4cba5674ec2880a6a278c9 100644 (file)
@@ -1143,8 +1143,10 @@ static int __devinit gbefb_probe(struct platform_device *p_dev)
                return -ENOMEM;
 
 #ifndef MODULE
-       if (fb_get_options("gbefb", &options))
-               return -ENODEV;
+       if (fb_get_options("gbefb", &options)) {
+               ret = -ENODEV;
+               goto out_release_framebuffer;
+       }
        gbefb_setup(options);
 #endif
 
index f9fa0fd002922ba91f10e8508c8093817ad8fef6..1717623aabc0d3ff8764557757605badea859c2d 100644 (file)
@@ -869,12 +869,9 @@ static int MGAG100_preinit(struct matrox_fb_info *minfo)
        minfo->capable.plnwt = minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100
                        ? minfo->devflags.sgram : 1;
 
-#ifdef CONFIG_FB_MATROX_G
        if (minfo->devflags.g450dac) {
                minfo->outputs[0].output = &g450out;
-       } else
-#endif
-       {
+       } else {
                minfo->outputs[0].output = &m1064;
        }
        minfo->outputs[0].src = minfo->outputs[0].default_src;
index 1e3e8f19783e30f408ca82a480b92832ee437423..31b8f67477b7957b8e193f757c4821e810b685a3 100644 (file)
@@ -280,7 +280,7 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
        return fxtal * (*feed) / (*in) * ctl->den;
 }
 
-static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
+static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl,
                unsigned int htotal, unsigned int vtotal,
                unsigned int* in, unsigned int* feed, unsigned int* post,
                unsigned int* htotal2) {
index 2ffb34af4c59ed2c7ea342c1a627aa1b0a5f1832..87785c215a52c6e94ca9a0d799f1f25ed89c654d 100644 (file)
@@ -1590,7 +1590,7 @@ static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
        blizzard.auto_update_window.width = fbdev->panel->x_res;
        blizzard.auto_update_window.height = fbdev->panel->y_res;
        blizzard.auto_update_window.out_x = 0;
-       blizzard.auto_update_window.out_x = 0;
+       blizzard.auto_update_window.out_y = 0;
        blizzard.auto_update_window.out_width = fbdev->panel->x_res;
        blizzard.auto_update_window.out_height = fbdev->panel->y_res;
        blizzard.auto_update_window.format = 0;
index ed371c868b3ac6d7ab0c171689415092d55dc9b7..b16e6138fdd46a971ee20c542a65a32a634bf0c4 100644 (file)
@@ -181,6 +181,15 @@ void savagefb_create_i2c_busses(struct fb_info *info)
                par->chan.algo.getscl = prosavage_gpio_getscl;
                break;
        case FB_ACCEL_SAVAGE4:
+               par->chan.reg = CR_SERIAL1;
+               if (par->pcidev->revision > 1 && !(VGArCR(0xa6, par) & 0x40))
+                       par->chan.reg = CR_SERIAL2;
+               par->chan.ioaddr      = par->mmio.vbase;
+               par->chan.algo.setsda = prosavage_gpio_setsda;
+               par->chan.algo.setscl = prosavage_gpio_setscl;
+               par->chan.algo.getsda = prosavage_gpio_getsda;
+               par->chan.algo.getscl = prosavage_gpio_getscl;
+               break;
        case FB_ACCEL_SAVAGE2000:
                par->chan.reg         = 0xff20;
                par->chan.ioaddr      = par->mmio.vbase;
index d496adb0f8322f2208b7a1d9b7352ba02b3caad2..96f01ee2a412f1df7aadc330169525c5df644ed8 100644 (file)
@@ -5,5 +5,5 @@
 obj-$(CONFIG_FB_VIA) += viafb.o
 
 viafb-y        :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o \
-       via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o \
+       via_utility.o vt1636.o global.o tblDPASetting.o viamode.o \
        via-core.o via-gpio.o via_modesetting.o
index e44893ea590d9b1b2194a621d567b38b78a70364..3c969cdef0af90ce8ed7f1450e5cfc9e2f6cb6d5 100644 (file)
@@ -283,11 +283,12 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
                writel(tmp, engine + 0x1C);
        }
 
-       if (op != VIA_BITBLT_COLOR)
+       if (op == VIA_BITBLT_FILL) {
+               writel(fg_color, engine + 0x58);
+       } else if (op == VIA_BITBLT_MONO) {
                writel(fg_color, engine + 0x4C);
-
-       if (op == VIA_BITBLT_MONO)
                writel(bg_color, engine + 0x50);
+       }
 
        if (op == VIA_BITBLT_FILL)
                ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
@@ -314,13 +315,11 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
        return 0;
 }
 
-int viafb_init_engine(struct fb_info *info)
+int viafb_setup_engine(struct fb_info *info)
 {
        struct viafb_par *viapar = info->par;
        void __iomem *engine;
-       int highest_reg, i;
-       u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
-               vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
+       u32 chip_name = viapar->shared->chip_info.gfx_chip_name;
 
        engine = viapar->shared->vdev->engine_mmio;
        if (!engine) {
@@ -329,18 +328,6 @@ int viafb_init_engine(struct fb_info *info)
                return -ENOMEM;
        }
 
-       /* Initialize registers to reset the 2D engine */
-       switch (viapar->shared->chip_info.twod_engine) {
-       case VIA_2D_ENG_M1:
-               highest_reg = 0x5c;
-               break;
-       default:
-               highest_reg = 0x40;
-               break;
-       }
-       for (i = 0; i <= highest_reg; i += 4)
-               writel(0x0, engine + i);
-
        switch (chip_name) {
        case UNICHROME_CLE266:
        case UNICHROME_K400:
@@ -356,6 +343,7 @@ int viafb_init_engine(struct fb_info *info)
                break;
        case UNICHROME_VX800:
        case UNICHROME_VX855:
+       case UNICHROME_VX900:
                viapar->shared->hw_bitblt = hw_bitblt_2;
                break;
        default:
@@ -386,12 +374,36 @@ int viafb_init_engine(struct fb_info *info)
        viapar->shared->vdev->camera_fbmem_offset = viapar->fbmem_free;
 #endif
 
+       viafb_reset_engine(viapar);
+       return 0;
+}
+
+void viafb_reset_engine(struct viafb_par *viapar)
+{
+       void __iomem *engine = viapar->shared->vdev->engine_mmio;
+       int highest_reg, i;
+       u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
+               vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
+
+       /* Initialize registers to reset the 2D engine */
+       switch (viapar->shared->chip_info.twod_engine) {
+       case VIA_2D_ENG_M1:
+               highest_reg = 0x5c;
+               break;
+       default:
+               highest_reg = 0x40;
+               break;
+       }
+       for (i = 0; i <= highest_reg; i += 4)
+               writel(0x0, engine + i);
+
        /* Init AGP and VQ regs */
        switch (chip_name) {
        case UNICHROME_K8M890:
        case UNICHROME_P4M900:
        case UNICHROME_VX800:
        case UNICHROME_VX855:
+       case UNICHROME_VX900:
                writel(0x00100000, engine + VIA_REG_CR_TRANSET);
                writel(0x680A0000, engine + VIA_REG_CR_TRANSPACE);
                writel(0x02000000, engine + VIA_REG_CR_TRANSPACE);
@@ -428,6 +440,7 @@ int viafb_init_engine(struct fb_info *info)
        case UNICHROME_P4M900:
        case UNICHROME_VX800:
        case UNICHROME_VX855:
+       case UNICHROME_VX900:
                vq_start_low |= 0x20000000;
                vq_end_low |= 0x20000000;
                vq_high |= 0x20000000;
@@ -473,7 +486,7 @@ int viafb_init_engine(struct fb_info *info)
        writel(0x0, engine + VIA_REG_CURSOR_ORG);
        writel(0x0, engine + VIA_REG_CURSOR_BG);
        writel(0x0, engine + VIA_REG_CURSOR_FG);
-       return 0;
+       return;
 }
 
 void viafb_show_hw_cursor(struct fb_info *info, int Status)
index 2c122d2923654a1874177699cc703a782782dd4a..79d5e10cc83575fba00d0b8adb2ec2e290d3cdc8 100644 (file)
 #define VIA_BITBLT_MONO                2
 #define VIA_BITBLT_FILL                3
 
-int viafb_init_engine(struct fb_info *info);
+int viafb_setup_engine(struct fb_info *info);
+void viafb_reset_engine(struct viafb_par *viapar);
 void viafb_show_hw_cursor(struct fb_info *info, int Status);
 void viafb_wait_engine_idle(struct fb_info *info);
 
index ef1f3de2e052824c52babc999d661ae87cf46149..48f1342897bd9b45389ba3e61cca1351fa024524 100644 (file)
@@ -71,6 +71,9 @@
 #define     UNICHROME_VX855         12
 #define     UNICHROME_VX855_DID     0x5122
 
+#define     UNICHROME_VX900         13
+#define     UNICHROME_VX900_DID     0x7122
+
 /**************************************************/
 /* Definition TMDS Trasmitter Information         */
 /**************************************************/
index 39b040bb3817f775cc45c4d73407464e366c296c..84e21b39dd0bd8137f592241e9122538d6e7cbbe 100644 (file)
 static void tmds_register_write(int index, u8 data);
 static int tmds_register_read(int index);
 static int tmds_register_read_bytes(int index, u8 *buff, int buff_len);
-static void dvi_get_panel_size_from_DDCv1(struct tmds_chip_information
-       *tmds_chip, struct tmds_setting_information *tmds_setting);
-static void dvi_get_panel_size_from_DDCv2(struct tmds_chip_information
-       *tmds_chip, struct tmds_setting_information *tmds_setting);
+static void __devinit dvi_get_panel_size_from_DDCv1(
+       struct tmds_chip_information *tmds_chip,
+       struct tmds_setting_information *tmds_setting);
+static void __devinit dvi_get_panel_size_from_DDCv2(
+       struct tmds_chip_information *tmds_chip,
+       struct tmds_setting_information *tmds_setting);
 static int viafb_dvi_query_EDID(void);
 
 static int check_tmds_chip(int device_id_subaddr, int device_id)
@@ -39,7 +41,7 @@ static int check_tmds_chip(int device_id_subaddr, int device_id)
                return FAIL;
 }
 
-void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
+void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
        struct tmds_setting_information *tmds_setting)
 {
        DEBUG_MSG(KERN_INFO "viafb_init_dvi_size()\n");
@@ -60,7 +62,7 @@ void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
        return;
 }
 
-int viafb_tmds_trasmitter_identify(void)
+int __devinit viafb_tmds_trasmitter_identify(void)
 {
        unsigned char sr2a = 0, sr1e = 0, sr3e = 0;
 
@@ -208,8 +210,6 @@ void viafb_dvi_set_mode(struct VideoModeTable *mode, int mode_bpp,
                }
        }
        viafb_fill_crtc_timing(pDviTiming, mode, mode_bpp / 8, set_iga);
-       viafb_set_output_path(DEVICE_DVI, set_iga,
-                       viaparinfo->chip_info->tmds_chip_info.output_interface);
 }
 
 /* Sense DVI Connector */
@@ -313,8 +313,9 @@ static int viafb_dvi_query_EDID(void)
 }
 
 /* Get Panel Size Using EDID1 Table */
-static void dvi_get_panel_size_from_DDCv1(struct tmds_chip_information
-       *tmds_chip, struct tmds_setting_information *tmds_setting)
+static void __devinit dvi_get_panel_size_from_DDCv1(
+       struct tmds_chip_information *tmds_chip,
+       struct tmds_setting_information *tmds_setting)
 {
        int i, max_h = 0, tmp, restore;
        unsigned char rData;
@@ -418,8 +419,9 @@ static void dvi_get_panel_size_from_DDCv1(struct tmds_chip_information
 }
 
 /* Get Panel Size Using EDID2 Table */
-static void dvi_get_panel_size_from_DDCv2(struct tmds_chip_information
-       *tmds_chip, struct tmds_setting_information *tmds_setting)
+static void __devinit dvi_get_panel_size_from_DDCv2(
+       struct tmds_chip_information *tmds_chip,
+       struct tmds_setting_information *tmds_setting)
 {
        int restore;
        unsigned char R_Buffer[2];
@@ -467,26 +469,6 @@ static void dvi_get_panel_size_from_DDCv2(struct tmds_chip_information
 /* If Disable DVI, turn off pad */
 void viafb_dvi_disable(void)
 {
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DVP0)
-               viafb_write_reg(SR1E, VIASR,
-               viafb_read_reg(VIASR, SR1E) & (~0xC0));
-
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DVP1)
-               viafb_write_reg(SR1E, VIASR,
-               viafb_read_reg(VIASR, SR1E) & (~0x30));
-
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DFP_HIGH)
-               viafb_write_reg(SR2A, VIASR,
-               viafb_read_reg(VIASR, SR2A) & (~0x0C));
-
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DFP_LOW)
-               viafb_write_reg(SR2A, VIASR,
-               viafb_read_reg(VIASR, SR2A) & (~0x03));
-
        if (viaparinfo->chip_info->
                tmds_chip_info.output_interface == INTERFACE_TMDS)
                /* Turn off TMDS power. */
@@ -494,38 +476,101 @@ void viafb_dvi_disable(void)
                viafb_read_reg(VIACR, CRD2) | 0x08);
 }
 
+static void dvi_patch_skew_dvp0(void)
+{
+       /* Reset data driving first: */
+       viafb_write_reg_mask(SR1B, VIASR, 0, BIT1);
+       viafb_write_reg_mask(SR2A, VIASR, 0, BIT4);
+
+       switch (viaparinfo->chip_info->gfx_chip_name) {
+       case UNICHROME_P4M890:
+               {
+                       if ((viaparinfo->tmds_setting_info->h_active == 1600) &&
+                               (viaparinfo->tmds_setting_info->v_active ==
+                               1200))
+                               viafb_write_reg_mask(CR96, VIACR, 0x03,
+                                              BIT0 + BIT1 + BIT2);
+                       else
+                               viafb_write_reg_mask(CR96, VIACR, 0x07,
+                                              BIT0 + BIT1 + BIT2);
+                       break;
+               }
+
+       case UNICHROME_P4M900:
+               {
+                       viafb_write_reg_mask(CR96, VIACR, 0x07,
+                                      BIT0 + BIT1 + BIT2 + BIT3);
+                       viafb_write_reg_mask(SR1B, VIASR, 0x02, BIT1);
+                       viafb_write_reg_mask(SR2A, VIASR, 0x10, BIT4);
+                       break;
+               }
+
+       default:
+               {
+                       break;
+               }
+       }
+}
+
+static void dvi_patch_skew_dvp_low(void)
+{
+       switch (viaparinfo->chip_info->gfx_chip_name) {
+       case UNICHROME_K8M890:
+               {
+                       viafb_write_reg_mask(CR99, VIACR, 0x03, BIT0 + BIT1);
+                       break;
+               }
+
+       case UNICHROME_P4M900:
+               {
+                       viafb_write_reg_mask(CR99, VIACR, 0x08,
+                                      BIT0 + BIT1 + BIT2 + BIT3);
+                       break;
+               }
+
+       case UNICHROME_P4M890:
+               {
+                       viafb_write_reg_mask(CR99, VIACR, 0x0F,
+                                      BIT0 + BIT1 + BIT2 + BIT3);
+                       break;
+               }
+
+       default:
+               {
+                       break;
+               }
+       }
+}
+
 /* If Enable DVI, turn off pad */
 void viafb_dvi_enable(void)
 {
        u8 data;
 
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DVP0) {
-               viafb_write_reg(SR1E, VIASR,
-                       viafb_read_reg(VIASR, SR1E) | 0xC0);
+       switch (viaparinfo->chip_info->tmds_chip_info.output_interface) {
+       case INTERFACE_DVP0:
+               viafb_write_reg_mask(CR6B, VIACR, 0x01, BIT0);
+               viafb_write_reg_mask(CR6C, VIACR, 0x21, BIT0 + BIT5);
+               dvi_patch_skew_dvp0();
                if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
                        tmds_register_write(0x88, 0x3b);
                else
                        /*clear CR91[5] to direct on display period
                           in the secondary diplay path */
-                       viafb_write_reg(CR91, VIACR,
-                       viafb_read_reg(VIACR, CR91) & 0xDF);
-       }
+                       via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
+               break;
 
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DVP1) {
-               viafb_write_reg(SR1E, VIASR,
-                       viafb_read_reg(VIASR, SR1E) | 0x30);
+       case INTERFACE_DVP1:
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+                       viafb_write_reg_mask(CR93, VIACR, 0x21, BIT0 + BIT5);
 
                /*fix dvi cann't be enabled with MB VT5718C4 - Al Zhang */
-               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
                        tmds_register_write(0x88, 0x3b);
-               } else {
+               else
                        /*clear CR91[5] to direct on display period
                          in the secondary diplay path */
-                       viafb_write_reg(CR91, VIACR,
-                       viafb_read_reg(VIACR, CR91) & 0xDF);
-               }
+                       via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
 
                /*fix DVI cannot enable on EPIA-M board */
                if (viafb_platform_epia_dvi == 1) {
@@ -537,36 +582,40 @@ void viafb_dvi_enable(void)
                                else
                                        data = 0x37;
                                viafb_i2c_writebyte(viaparinfo->chip_info->
-                                                      tmds_chip_info.i2c_port,
-                                                   viaparinfo->chip_info->
-                                                      tmds_chip_info.tmds_chip_slave_addr,
-                                                   0x08, data);
+                                       tmds_chip_info.i2c_port,
+                                       viaparinfo->chip_info->
+                                       tmds_chip_info.tmds_chip_slave_addr,
+                                       0x08, data);
                        }
                }
-       }
+               break;
 
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DFP_HIGH) {
-               viafb_write_reg(SR2A, VIASR,
-                       viafb_read_reg(VIASR, SR2A) | 0x0C);
-               viafb_write_reg(CR91, VIACR,
-                       viafb_read_reg(VIACR, CR91) & 0xDF);
-       }
+       case INTERFACE_DFP_HIGH:
+               if (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266)
+                       via_write_reg_mask(VIACR, CR97, 0x03, 0x03);
 
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_DFP_LOW) {
-               viafb_write_reg(SR2A, VIASR,
-                       viafb_read_reg(VIASR, SR2A) | 0x03);
-               viafb_write_reg(CR91, VIACR,
-                       viafb_read_reg(VIACR, CR91) & 0xDF);
-       }
-       if (viaparinfo->chip_info->
-               tmds_chip_info.output_interface == INTERFACE_TMDS) {
+               via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
+               break;
+
+       case INTERFACE_DFP_LOW:
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+                       break;
+
+               dvi_patch_skew_dvp_low();
+               via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
+               break;
+
+       case INTERFACE_TMDS:
                /* Turn on Display period in the panel path. */
                viafb_write_reg_mask(CR91, VIACR, 0, BIT7);
 
                /* Turn on TMDS power. */
                viafb_write_reg_mask(CRD2, VIACR, 0, BIT3);
+               break;
        }
-}
 
+       if (viaparinfo->tmds_setting_info->iga_path == IGA2) {
+               /* Disable LCD Scaling */
+               viafb_write_reg_mask(CR79, VIACR, 0x00, BIT0);
+       }
+}
index 0dffcfd395f301bd186abcfd6820c417978f7f68..2c525c0c1adb157ca3cd171bba9a45e997ab4894 100644 (file)
@@ -56,8 +56,8 @@
 int viafb_dvi_sense(void);
 void viafb_dvi_disable(void);
 void viafb_dvi_enable(void);
-int viafb_tmds_trasmitter_identify(void);
-void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
+int __devinit viafb_tmds_trasmitter_identify(void);
+void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
        struct tmds_setting_information *tmds_setting);
 void viafb_dvi_set_mode(struct VideoModeTable *videoMode, int mode_bpp,
        int set_iga);
index 28221a062dda0e86366808995d7dfb36ecac9123..38ef5ac669539dc69191f65eea8f6d7923c5ebac 100644 (file)
@@ -48,7 +48,6 @@
 #include "via_utility.h"
 #include "vt1636.h"
 #include "tblDPASetting.h"
-#include "tbl1636.h"
 
 /* External struct*/
 
index 7dcb4d5bb9c3aa18dbb9e8517cd27e2c6a31d1c9..36d73f940d8b1e9f1affa72dc377d3e7ccf767f2 100644 (file)
@@ -718,16 +718,20 @@ static struct rgbLUT palLUT_table[] = {
                                                                     0x00}
 };
 
-static void set_crt_output_path(int set_iga);
-static void dvi_patch_skew_dvp0(void);
-static void dvi_patch_skew_dvp1(void);
-static void dvi_patch_skew_dvp_low(void);
-static void set_dvi_output_path(int set_iga, int output_interface);
-static void set_lcd_output_path(int set_iga, int output_interface);
+static struct via_device_mapping device_mapping[] = {
+       {VIA_LDVP0, "LDVP0"},
+       {VIA_LDVP1, "LDVP1"},
+       {VIA_DVP0, "DVP0"},
+       {VIA_CRT, "CRT"},
+       {VIA_DVP1, "DVP1"},
+       {VIA_LVDS1, "LVDS1"},
+       {VIA_LVDS2, "LVDS2"}
+};
+
 static void load_fix_bit_crtc_reg(void);
-static void init_gfx_chip_info(int chip_type);
-static void init_tmds_chip_info(void);
-static void init_lvds_chip_info(void);
+static void __devinit init_gfx_chip_info(int chip_type);
+static void __devinit init_tmds_chip_info(void);
+static void __devinit init_lvds_chip_info(void);
 static void device_screen_off(void);
 static void device_screen_on(void);
 static void set_display_channel(void);
@@ -755,6 +759,66 @@ void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
        outb(b, LUT_DATA);
 }
 
+static u32 get_dvi_devices(int output_interface)
+{
+       switch (output_interface) {
+       case INTERFACE_DVP0:
+               return VIA_DVP0 | VIA_LDVP0;
+
+       case INTERFACE_DVP1:
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+                       return VIA_LDVP1;
+               else
+                       return VIA_DVP1;
+
+       case INTERFACE_DFP_HIGH:
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+                       return 0;
+               else
+                       return VIA_LVDS2 | VIA_DVP0;
+
+       case INTERFACE_DFP_LOW:
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+                       return 0;
+               else
+                       return VIA_DVP1 | VIA_LVDS1;
+
+       case INTERFACE_TMDS:
+               return VIA_LVDS1;
+       }
+
+       return 0;
+}
+
+static u32 get_lcd_devices(int output_interface)
+{
+       switch (output_interface) {
+       case INTERFACE_DVP0:
+               return VIA_DVP0;
+
+       case INTERFACE_DVP1:
+               return VIA_DVP1;
+
+       case INTERFACE_DFP_HIGH:
+               return VIA_LVDS2 | VIA_DVP0;
+
+       case INTERFACE_DFP_LOW:
+               return VIA_LVDS1 | VIA_DVP1;
+
+       case INTERFACE_DFP:
+               return VIA_LVDS1 | VIA_LVDS2;
+
+       case INTERFACE_LVDS0:
+       case INTERFACE_LVDS0LVDS1:
+               return VIA_LVDS1;
+
+       case INTERFACE_LVDS1:
+               return VIA_LVDS2;
+       }
+
+       return 0;
+}
+
 /*Set IGA path for each device*/
 void viafb_set_iga_path(void)
 {
@@ -821,6 +885,48 @@ void viafb_set_iga_path(void)
                        viaparinfo->tmds_setting_info->iga_path = IGA1;
                }
        }
+
+       viaparinfo->shared->iga1_devices = 0;
+       viaparinfo->shared->iga2_devices = 0;
+       if (viafb_CRT_ON) {
+               if (viaparinfo->crt_setting_info->iga_path == IGA1)
+                       viaparinfo->shared->iga1_devices |= VIA_CRT;
+               else
+                       viaparinfo->shared->iga2_devices |= VIA_CRT;
+       }
+
+       if (viafb_DVI_ON) {
+               if (viaparinfo->tmds_setting_info->iga_path == IGA1)
+                       viaparinfo->shared->iga1_devices |= get_dvi_devices(
+                               viaparinfo->chip_info->
+                               tmds_chip_info.output_interface);
+               else
+                       viaparinfo->shared->iga2_devices |= get_dvi_devices(
+                               viaparinfo->chip_info->
+                               tmds_chip_info.output_interface);
+       }
+
+       if (viafb_LCD_ON) {
+               if (viaparinfo->lvds_setting_info->iga_path == IGA1)
+                       viaparinfo->shared->iga1_devices |= get_lcd_devices(
+                               viaparinfo->chip_info->
+                               lvds_chip_info.output_interface);
+               else
+                       viaparinfo->shared->iga2_devices |= get_lcd_devices(
+                               viaparinfo->chip_info->
+                               lvds_chip_info.output_interface);
+       }
+
+       if (viafb_LCD2_ON) {
+               if (viaparinfo->lvds_setting_info2->iga_path == IGA1)
+                       viaparinfo->shared->iga1_devices |= get_lcd_devices(
+                               viaparinfo->chip_info->
+                               lvds_chip_info2.output_interface);
+               else
+                       viaparinfo->shared->iga2_devices |= get_lcd_devices(
+                               viaparinfo->chip_info->
+                               lvds_chip_info2.output_interface);
+       }
 }
 
 static void set_color_register(u8 index, u8 red, u8 green, u8 blue)
@@ -844,295 +950,266 @@ void viafb_set_secondary_color_register(u8 index, u8 red, u8 green, u8 blue)
        set_color_register(index, red, green, blue);
 }
 
-void viafb_set_output_path(int device, int set_iga, int output_interface)
+static void set_source_common(u8 index, u8 offset, u8 iga)
 {
-       switch (device) {
-       case DEVICE_CRT:
-               set_crt_output_path(set_iga);
-               break;
-       case DEVICE_DVI:
-               set_dvi_output_path(set_iga, output_interface);
+       u8 value, mask = 1 << offset;
+
+       switch (iga) {
+       case IGA1:
+               value = 0x00;
                break;
-       case DEVICE_LCD:
-               set_lcd_output_path(set_iga, output_interface);
+       case IGA2:
+               value = mask;
                break;
+       default:
+               printk(KERN_WARNING "viafb: Unsupported source: %d\n", iga);
+               return;
        }
+
+       via_write_reg_mask(VIACR, index, value, mask);
 }
 
-static void set_crt_output_path(int set_iga)
+static void set_crt_source(u8 iga)
 {
-       viafb_write_reg_mask(CR36, VIACR, 0x00, BIT4 + BIT5);
+       u8 value;
 
-       switch (set_iga) {
+       switch (iga) {
        case IGA1:
-               viafb_write_reg_mask(SR16, VIASR, 0x00, BIT6);
+               value = 0x00;
                break;
        case IGA2:
-               viafb_write_reg_mask(CR6A, VIACR, 0xC0, BIT6 + BIT7);
-               viafb_write_reg_mask(SR16, VIASR, 0x40, BIT6);
+               value = 0x40;
                break;
+       default:
+               printk(KERN_WARNING "viafb: Unsupported source: %d\n", iga);
+               return;
        }
+
+       via_write_reg_mask(VIASR, 0x16, value, 0x40);
 }
 
-static void dvi_patch_skew_dvp0(void)
+static inline void set_ldvp0_source(u8 iga)
 {
-       /* Reset data driving first: */
-       viafb_write_reg_mask(SR1B, VIASR, 0, BIT1);
-       viafb_write_reg_mask(SR2A, VIASR, 0, BIT4);
-
-       switch (viaparinfo->chip_info->gfx_chip_name) {
-       case UNICHROME_P4M890:
-               {
-                       if ((viaparinfo->tmds_setting_info->h_active == 1600) &&
-                               (viaparinfo->tmds_setting_info->v_active ==
-                               1200))
-                               viafb_write_reg_mask(CR96, VIACR, 0x03,
-                                              BIT0 + BIT1 + BIT2);
-                       else
-                               viafb_write_reg_mask(CR96, VIACR, 0x07,
-                                              BIT0 + BIT1 + BIT2);
-                       break;
-               }
+       set_source_common(0x6C, 7, iga);
+}
 
-       case UNICHROME_P4M900:
-               {
-                       viafb_write_reg_mask(CR96, VIACR, 0x07,
-                                      BIT0 + BIT1 + BIT2 + BIT3);
-                       viafb_write_reg_mask(SR1B, VIASR, 0x02, BIT1);
-                       viafb_write_reg_mask(SR2A, VIASR, 0x10, BIT4);
-                       break;
-               }
+static inline void set_ldvp1_source(u8 iga)
+{
+       set_source_common(0x93, 7, iga);
+}
 
-       default:
-               {
-                       break;
-               }
-       }
+static inline void set_dvp0_source(u8 iga)
+{
+       set_source_common(0x96, 4, iga);
 }
 
-static void dvi_patch_skew_dvp1(void)
+static inline void set_dvp1_source(u8 iga)
 {
-       switch (viaparinfo->chip_info->gfx_chip_name) {
-       case UNICHROME_CX700:
-               {
-                       break;
-               }
+       set_source_common(0x9B, 4, iga);
+}
 
-       default:
-               {
-                       break;
-               }
-       }
+static inline void set_lvds1_source(u8 iga)
+{
+       set_source_common(0x99, 4, iga);
 }
 
-static void dvi_patch_skew_dvp_low(void)
+static inline void set_lvds2_source(u8 iga)
 {
-       switch (viaparinfo->chip_info->gfx_chip_name) {
-       case UNICHROME_K8M890:
-               {
-                       viafb_write_reg_mask(CR99, VIACR, 0x03, BIT0 + BIT1);
-                       break;
-               }
+       set_source_common(0x97, 4, iga);
+}
 
-       case UNICHROME_P4M900:
-               {
-                       viafb_write_reg_mask(CR99, VIACR, 0x08,
-                                      BIT0 + BIT1 + BIT2 + BIT3);
-                       break;
-               }
+void via_set_source(u32 devices, u8 iga)
+{
+       if (devices & VIA_LDVP0)
+               set_ldvp0_source(iga);
+       if (devices & VIA_LDVP1)
+               set_ldvp1_source(iga);
+       if (devices & VIA_DVP0)
+               set_dvp0_source(iga);
+       if (devices & VIA_CRT)
+               set_crt_source(iga);
+       if (devices & VIA_DVP1)
+               set_dvp1_source(iga);
+       if (devices & VIA_LVDS1)
+               set_lvds1_source(iga);
+       if (devices & VIA_LVDS2)
+               set_lvds2_source(iga);
+}
 
-       case UNICHROME_P4M890:
-               {
-                       viafb_write_reg_mask(CR99, VIACR, 0x0F,
-                                      BIT0 + BIT1 + BIT2 + BIT3);
-                       break;
-               }
+static void set_crt_state(u8 state)
+{
+       u8 value;
 
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x00;
+               break;
+       case VIA_STATE_STANDBY:
+               value = 0x10;
+               break;
+       case VIA_STATE_SUSPEND:
+               value = 0x20;
+               break;
+       case VIA_STATE_OFF:
+               value = 0x30;
+               break;
        default:
-               {
-                       break;
-               }
+               return;
        }
+
+       via_write_reg_mask(VIACR, 0x36, value, 0x30);
 }
 
-static void set_dvi_output_path(int set_iga, int output_interface)
+static void set_dvp0_state(u8 state)
 {
-       switch (output_interface) {
-       case INTERFACE_DVP0:
-               viafb_write_reg_mask(CR6B, VIACR, 0x01, BIT0);
-
-               if (set_iga == IGA1) {
-                       viafb_write_reg_mask(CR96, VIACR, 0x00, BIT4);
-                       viafb_write_reg_mask(CR6C, VIACR, 0x21, BIT0 +
-                               BIT5 + BIT7);
-               } else {
-                       viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
-                       viafb_write_reg_mask(CR6C, VIACR, 0xA1, BIT0 +
-                               BIT5 + BIT7);
-               }
-
-               viafb_write_reg_mask(SR1E, VIASR, 0xC0, BIT7 + BIT6);
+       u8 value;
 
-               dvi_patch_skew_dvp0();
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0xC0;
                break;
+       case VIA_STATE_OFF:
+               value = 0x00;
+               break;
+       default:
+               return;
+       }
 
-       case INTERFACE_DVP1:
-               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
-                       if (set_iga == IGA1)
-                               viafb_write_reg_mask(CR93, VIACR, 0x21,
-                                              BIT0 + BIT5 + BIT7);
-                       else
-                               viafb_write_reg_mask(CR93, VIACR, 0xA1,
-                                              BIT0 + BIT5 + BIT7);
-               } else {
-                       if (set_iga == IGA1)
-                               viafb_write_reg_mask(CR9B, VIACR, 0x00, BIT4);
-                       else
-                               viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
-               }
+       via_write_reg_mask(VIASR, 0x1E, value, 0xC0);
+}
+
+static void set_dvp1_state(u8 state)
+{
+       u8 value;
 
-               viafb_write_reg_mask(SR1E, VIASR, 0x30, BIT4 + BIT5);
-               dvi_patch_skew_dvp1();
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x30;
                break;
-       case INTERFACE_DFP_HIGH:
-               if (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266) {
-                       if (set_iga == IGA1) {
-                               viafb_write_reg_mask(CR96, VIACR, 0x00, BIT4);
-                               viafb_write_reg_mask(CR97, VIACR, 0x03,
-                                              BIT0 + BIT1 + BIT4);
-                       } else {
-                               viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
-                               viafb_write_reg_mask(CR97, VIACR, 0x13,
-                                              BIT0 + BIT1 + BIT4);
-                       }
-               }
-               viafb_write_reg_mask(SR2A, VIASR, 0x0C, BIT2 + BIT3);
+       case VIA_STATE_OFF:
+               value = 0x00;
                break;
+       default:
+               return;
+       }
 
-       case INTERFACE_DFP_LOW:
-               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
-                       break;
+       via_write_reg_mask(VIASR, 0x1E, value, 0x30);
+}
 
-               if (set_iga == IGA1) {
-                       viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
-                       viafb_write_reg_mask(CR9B, VIACR, 0x00, BIT4);
-               } else {
-                       viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
-                       viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
-               }
+static void set_lvds1_state(u8 state)
+{
+       u8 value;
 
-               viafb_write_reg_mask(SR2A, VIASR, 0x03, BIT0 + BIT1);
-               dvi_patch_skew_dvp_low();
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x03;
                break;
-
-       case INTERFACE_TMDS:
-               if (set_iga == IGA1)
-                       viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
-               else
-                       viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
+       case VIA_STATE_OFF:
+               value = 0x00;
                break;
+       default:
+               return;
        }
 
-       if (set_iga == IGA2) {
-               enable_second_display_channel();
-               /* Disable LCD Scaling */
-               viafb_write_reg_mask(CR79, VIACR, 0x00, BIT0);
-       }
+       via_write_reg_mask(VIASR, 0x2A, value, 0x03);
 }
 
-static void set_lcd_output_path(int set_iga, int output_interface)
+static void set_lvds2_state(u8 state)
 {
-       DEBUG_MSG(KERN_INFO
-                 "set_lcd_output_path, iga:%d,out_interface:%d\n",
-                 set_iga, output_interface);
-       switch (set_iga) {
-       case IGA1:
-               viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3);
-               viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
+       u8 value;
 
-               disable_second_display_channel();
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x0C;
                break;
-
-       case IGA2:
-               viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3);
-               viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
-
-               enable_second_display_channel();
+       case VIA_STATE_OFF:
+               value = 0x00;
                break;
+       default:
+               return;
        }
 
-       switch (output_interface) {
-       case INTERFACE_DVP0:
-               if (set_iga == IGA1) {
-                       viafb_write_reg_mask(CR96, VIACR, 0x00, BIT4);
-               } else {
-                       viafb_write_reg(CR91, VIACR, 0x00);
-                       viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
-               }
-               break;
-
-       case INTERFACE_DVP1:
-               if (set_iga == IGA1)
-                       viafb_write_reg_mask(CR9B, VIACR, 0x00, BIT4);
-               else {
-                       viafb_write_reg(CR91, VIACR, 0x00);
-                       viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
-               }
-               break;
+       via_write_reg_mask(VIASR, 0x2A, value, 0x0C);
+}
 
-       case INTERFACE_DFP_HIGH:
-               if (set_iga == IGA1)
-                       viafb_write_reg_mask(CR97, VIACR, 0x00, BIT4);
-               else {
-                       viafb_write_reg(CR91, VIACR, 0x00);
-                       viafb_write_reg_mask(CR97, VIACR, 0x10, BIT4);
-                       viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
-               }
-               break;
+void via_set_state(u32 devices, u8 state)
+{
+       /*
+       TODO: Can we enable/disable these devices? How?
+       if (devices & VIA_LDVP0)
+       if (devices & VIA_LDVP1)
+       */
+       if (devices & VIA_DVP0)
+               set_dvp0_state(state);
+       if (devices & VIA_CRT)
+               set_crt_state(state);
+       if (devices & VIA_DVP1)
+               set_dvp1_state(state);
+       if (devices & VIA_LVDS1)
+               set_lvds1_state(state);
+       if (devices & VIA_LVDS2)
+               set_lvds2_state(state);
+}
 
-       case INTERFACE_DFP_LOW:
-               if (set_iga == IGA1)
-                       viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
-               else {
-                       viafb_write_reg(CR91, VIACR, 0x00);
-                       viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
-                       viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
-               }
+void via_set_sync_polarity(u32 devices, u8 polarity)
+{
+       if (polarity & ~(VIA_HSYNC_NEGATIVE | VIA_VSYNC_NEGATIVE)) {
+               printk(KERN_WARNING "viafb: Unsupported polarity: %d\n",
+                       polarity);
+               return;
+       }
 
-               break;
+       if (devices & VIA_CRT)
+               via_write_misc_reg_mask(polarity << 6, 0xC0);
+       if (devices & VIA_DVP1)
+               via_write_reg_mask(VIACR, 0x9B, polarity << 5, 0x60);
+       if (devices & VIA_LVDS1)
+               via_write_reg_mask(VIACR, 0x99, polarity << 5, 0x60);
+       if (devices & VIA_LVDS2)
+               via_write_reg_mask(VIACR, 0x97, polarity << 5, 0x60);
+}
 
-       case INTERFACE_DFP:
-               if ((UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name)
-                   || (UNICHROME_P4M890 ==
-                   viaparinfo->chip_info->gfx_chip_name))
-                       viafb_write_reg_mask(CR97, VIACR, 0x84,
-                                      BIT7 + BIT2 + BIT1 + BIT0);
-               if (set_iga == IGA1) {
-                       viafb_write_reg_mask(CR97, VIACR, 0x00, BIT4);
-                       viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
-               } else {
-                       viafb_write_reg(CR91, VIACR, 0x00);
-                       viafb_write_reg_mask(CR97, VIACR, 0x10, BIT4);
-                       viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
+u32 via_parse_odev(char *input, char **end)
+{
+       char *ptr = input;
+       u32 odev = 0;
+       bool next = true;
+       int i, len;
+
+       while (next) {
+               next = false;
+               for (i = 0; i < ARRAY_SIZE(device_mapping); i++) {
+                       len = strlen(device_mapping[i].name);
+                       if (!strncmp(ptr, device_mapping[i].name, len)) {
+                               odev |= device_mapping[i].device;
+                               ptr += len;
+                               if (*ptr == ',') {
+                                       ptr++;
+                                       next = true;
+                               }
+                       }
                }
-               break;
+       }
 
-       case INTERFACE_LVDS0:
-       case INTERFACE_LVDS0LVDS1:
-               if (set_iga == IGA1)
-                       viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
-               else
-                       viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
+       *end = ptr;
+       return odev;
+}
 
-               break;
+void via_odev_to_seq(struct seq_file *m, u32 odev)
+{
+       int i, count = 0;
 
-       case INTERFACE_LVDS1:
-               if (set_iga == IGA1)
-                       viafb_write_reg_mask(CR97, VIACR, 0x00, BIT4);
-               else
-                       viafb_write_reg_mask(CR97, VIACR, 0x10, BIT4);
-               break;
+       for (i = 0; i < ARRAY_SIZE(device_mapping); i++) {
+               if (odev & device_mapping[i].device) {
+                       if (count > 0)
+                               seq_putc(m, ',');
+
+                       seq_puts(m, device_mapping[i].name);
+                       count++;
+               }
        }
+
+       seq_putc(m, '\n');
 }
 
 static void load_fix_bit_crtc_reg(void)
@@ -1352,6 +1429,15 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
                            VX855_IGA1_DISPLAY_QUEUE_EXPIRE_NUM;
                }
 
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX900) {
+                       iga1_fifo_max_depth = VX900_IGA1_FIFO_MAX_DEPTH;
+                       iga1_fifo_threshold = VX900_IGA1_FIFO_THRESHOLD;
+                       iga1_fifo_high_threshold =
+                           VX900_IGA1_FIFO_HIGH_THRESHOLD;
+                       iga1_display_queue_expire_num =
+                           VX900_IGA1_DISPLAY_QUEUE_EXPIRE_NUM;
+               }
+
                /* Set Display FIFO Depath Select */
                reg_value = IGA1_FIFO_DEPTH_SELECT_FORMULA(iga1_fifo_max_depth);
                viafb_load_reg_num =
@@ -1492,6 +1578,15 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
                            VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM;
                }
 
+               if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX900) {
+                       iga2_fifo_max_depth = VX900_IGA2_FIFO_MAX_DEPTH;
+                       iga2_fifo_threshold = VX900_IGA2_FIFO_THRESHOLD;
+                       iga2_fifo_high_threshold =
+                           VX900_IGA2_FIFO_HIGH_THRESHOLD;
+                       iga2_display_queue_expire_num =
+                           VX900_IGA2_DISPLAY_QUEUE_EXPIRE_NUM;
+               }
+
                if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) {
                        /* Set Display FIFO Depath Select */
                        reg_value =
@@ -1612,6 +1707,7 @@ u32 viafb_get_clk_value(int clk)
                        break;
 
                case UNICHROME_VX855:
+               case UNICHROME_VX900:
                        value = vx855_encode_pll(pll_value[i].vx855_pll);
                        break;
                }
@@ -1645,6 +1741,7 @@ void viafb_set_vclock(u32 clk, int set_iga)
                case UNICHROME_P4M900:
                case UNICHROME_VX800:
                case UNICHROME_VX855:
+               case UNICHROME_VX900:
                        via_write_reg(VIASR, SR44, (clk & 0x0000FF));
                        via_write_reg(VIASR, SR45, (clk & 0x00FF00) >> 8);
                        via_write_reg(VIASR, SR46, (clk & 0xFF0000) >> 16);
@@ -1671,6 +1768,7 @@ void viafb_set_vclock(u32 clk, int set_iga)
                case UNICHROME_P4M900:
                case UNICHROME_VX800:
                case UNICHROME_VX855:
+               case UNICHROME_VX900:
                        via_write_reg(VIASR, SR4A, (clk & 0x0000FF));
                        via_write_reg(VIASR, SR4B, (clk & 0x00FF00) >> 8);
                        via_write_reg(VIASR, SR4C, (clk & 0xFF0000) >> 16);
@@ -1688,8 +1786,8 @@ void viafb_set_vclock(u32 clk, int set_iga)
        }
 
        if (set_iga == IGA2) {
-               viafb_write_reg_mask(SR40, VIASR, 0x01, BIT0);
-               viafb_write_reg_mask(SR40, VIASR, 0x00, BIT0);
+               viafb_write_reg_mask(SR40, VIASR, 0x04, BIT2);
+               viafb_write_reg_mask(SR40, VIASR, 0x00, BIT2);
        }
 
        /* Fire! */
@@ -1937,7 +2035,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
        int index = 0;
        int h_addr, v_addr;
        u32 pll_D_N;
-       u8 polarity = 0;
 
        for (i = 0; i < video_mode->mode_array; i++) {
                index = i;
@@ -1964,14 +2061,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
 
        h_addr = crt_reg.hor_addr;
        v_addr = crt_reg.ver_addr;
-
-       /* update polarity for CRT timing */
-       if (crt_table[index].h_sync_polarity == NEGATIVE)
-               polarity |= BIT6;
-       if (crt_table[index].v_sync_polarity == NEGATIVE)
-               polarity |= BIT7;
-       via_write_misc_reg_mask(polarity, BIT6 | BIT7);
-
        if (set_iga == IGA1) {
                viafb_unlock_crt();
                viafb_write_reg(CR09, VIACR, 0x00);     /*initial CR09=0 */
@@ -2004,7 +2093,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
 
 }
 
-void viafb_init_chip_info(int chip_type)
+void __devinit viafb_init_chip_info(int chip_type)
 {
        init_gfx_chip_info(chip_type);
        init_tmds_chip_info();
@@ -2071,7 +2160,7 @@ void viafb_update_device_setting(int hres, int vres,
        }
 }
 
-static void init_gfx_chip_info(int chip_type)
+static void __devinit init_gfx_chip_info(int chip_type)
 {
        u8 tmp;
 
@@ -2111,6 +2200,7 @@ static void init_gfx_chip_info(int chip_type)
        switch (viaparinfo->chip_info->gfx_chip_name) {
        case UNICHROME_VX800:
        case UNICHROME_VX855:
+       case UNICHROME_VX900:
                viaparinfo->chip_info->twod_engine = VIA_2D_ENG_M1;
                break;
        case UNICHROME_K8M890:
@@ -2123,7 +2213,7 @@ static void init_gfx_chip_info(int chip_type)
        }
 }
 
-static void init_tmds_chip_info(void)
+static void __devinit init_tmds_chip_info(void)
 {
        viafb_tmds_trasmitter_identify();
 
@@ -2168,7 +2258,7 @@ static void init_tmds_chip_info(void)
                &viaparinfo->shared->tmds_setting_info);
 }
 
-static void init_lvds_chip_info(void)
+static void __devinit init_lvds_chip_info(void)
 {
        viafb_lvds_trasmitter_identify();
        viafb_init_lcd_size();
@@ -2202,7 +2292,7 @@ static void init_lvds_chip_info(void)
                  viaparinfo->chip_info->lvds_chip_info.output_interface);
 }
 
-void viafb_init_dac(int set_iga)
+void __devinit viafb_init_dac(int set_iga)
 {
        int i;
        u8 tmp;
@@ -2275,11 +2365,24 @@ static void set_display_channel(void)
        }
 }
 
+static u8 get_sync(struct fb_info *info)
+{
+       u8 polarity = 0;
+
+       if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
+               polarity |= VIA_HSYNC_NEGATIVE;
+       if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
+               polarity |= VIA_VSYNC_NEGATIVE;
+       return polarity;
+}
+
 int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
        struct VideoModeTable *vmode_tbl1, int video_bpp1)
 {
        int i, j;
        int port;
+       u32 devices = viaparinfo->shared->iga1_devices
+               | viaparinfo->shared->iga2_devices;
        u8 value, index, mask;
        struct crt_mode_table *crt_timing;
        struct crt_mode_table *crt_timing1 = NULL;
@@ -2322,11 +2425,13 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
                break;
 
        case UNICHROME_VX855:
+       case UNICHROME_VX900:
                viafb_write_regx(VX855_ModeXregs, NUM_TOTAL_VX855_ModeXregs);
                break;
        }
 
        device_off();
+       via_set_state(devices, VIA_STATE_OFF);
 
        /* Fill VPIT Parameters */
        /* Write Misc Register */
@@ -2337,7 +2442,6 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
                via_write_reg(VIASR, i, VPIT.SR[i - 1]);
 
        viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2);
-       viafb_set_iga_path();
 
        /* Write CRTC */
        viafb_fill_crtc_timing(crt_timing, vmode_tbl, video_bpp / 8, IGA1);
@@ -2377,6 +2481,13 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
        via_set_primary_color_depth(viaparinfo->depth);
        via_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
                : viaparinfo->depth);
+       via_set_source(viaparinfo->shared->iga1_devices, IGA1);
+       via_set_source(viaparinfo->shared->iga2_devices, IGA2);
+       if (viaparinfo->shared->iga2_devices)
+               enable_second_display_channel();
+       else
+               disable_second_display_channel();
+
        /* Update Refresh Rate Setting */
 
        /* Clear On Screen */
@@ -2394,8 +2505,6 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
                                viaparinfo->crt_setting_info->iga_path);
                }
 
-               set_crt_output_path(viaparinfo->crt_setting_info->iga_path);
-
                /* Patch if set_hres is not 8 alignment (1366) to viafb_setmode
                to 8 alignment (1368),there is several pixels (2 pixels)
                on right side of screen. */
@@ -2482,10 +2591,16 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
                        viafb_DeviceStatus = CRT_Device;
        }
        device_on();
+       if (!viafb_dual_fb)
+               via_set_sync_polarity(devices, get_sync(viafbinfo));
+       else {
+               via_set_sync_polarity(viaparinfo->shared->iga1_devices,
+                       get_sync(viafbinfo));
+               via_set_sync_polarity(viaparinfo->shared->iga2_devices,
+                       get_sync(viafbinfo1));
+       }
 
-       if (viafb_SAMM_ON == 1)
-               viafb_write_reg_mask(CR6A, VIACR, 0xC0, BIT6 + BIT7);
-
+       via_set_state(devices, VIA_STATE_ON);
        device_screen_on();
        return 1;
 }
@@ -2526,31 +2641,18 @@ int viafb_get_refresh(int hres, int vres, u32 long_refresh)
 
 static void device_off(void)
 {
-       viafb_crt_disable();
        viafb_dvi_disable();
        viafb_lcd_disable();
 }
 
 static void device_on(void)
 {
-       if (viafb_CRT_ON == 1)
-               viafb_crt_enable();
        if (viafb_DVI_ON == 1)
                viafb_dvi_enable();
        if (viafb_LCD_ON == 1)
                viafb_lcd_enable();
 }
 
-void viafb_crt_disable(void)
-{
-       viafb_write_reg_mask(CR36, VIACR, BIT5 + BIT4, BIT5 + BIT4);
-}
-
-void viafb_crt_enable(void)
-{
-       viafb_write_reg_mask(CR36, VIACR, 0x0, BIT5 + BIT4);
-}
-
 static void enable_second_display_channel(void)
 {
        /* to enable second display channel. */
@@ -2567,7 +2669,6 @@ static void disable_second_display_channel(void)
        viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6);
 }
 
-
 void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
                                        *p_gfx_dpa_setting)
 {
@@ -2652,4 +2753,9 @@ void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
            crt_reg.ver_total - (crt_reg.ver_sync_start + crt_reg.ver_sync_end);
        var->lower_margin = crt_reg.ver_sync_start - crt_reg.ver_addr;
        var->vsync_len = crt_reg.ver_sync_end;
+       var->sync = 0;
+       if (crt_timing[index].h_sync_polarity == POSITIVE)
+               var->sync |= FB_SYNC_HOR_HIGH_ACT;
+       if (crt_timing[index].v_sync_polarity == POSITIVE)
+               var->sync |= FB_SYNC_VERT_HIGH_ACT;
 }
index c443998952941e0fbcd3cd197d5e367c2a4a59dc..668d534542ef31ba878bef01e9685fe525c678dd 100644 (file)
@@ -22,6 +22,8 @@
 #ifndef __HW_H__
 #define __HW_H__
 
+#include <linux/seq_file.h>
+
 #include "viamode.h"
 #include "global.h"
 #include "via_modesetting.h"
 #define viafb_write_reg(i, p, d)               via_write_reg(p, i, d)
 #define viafb_write_reg_mask(i, p, d, m)       via_write_reg_mask(p, i, d, m)
 
+/* VIA output devices */
+#define VIA_LDVP0      0x00000001
+#define VIA_LDVP1      0x00000002
+#define VIA_DVP0       0x00000004
+#define VIA_CRT                0x00000010
+#define VIA_DVP1       0x00000020
+#define VIA_LVDS1      0x00000040
+#define VIA_LVDS2      0x00000080
+
+/* VIA output device power states */
+#define VIA_STATE_ON           0
+#define VIA_STATE_STANDBY      1
+#define VIA_STATE_SUSPEND      2
+#define VIA_STATE_OFF          3
+
+/* VIA output device sync polarity */
+#define VIA_HSYNC_NEGATIVE     0x01
+#define VIA_VSYNC_NEGATIVE     0x02
+
 /***************************************************
 * Definition IGA1 Design Method of CRTC Registers *
 ****************************************************/
@@ -341,6 +362,17 @@ is reserved, so it may have problem to set 1600x1200 on IGA2. */
 #define VX855_IGA2_FIFO_HIGH_THRESHOLD          160
 #define VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM     320
 
+/* For VT3410 */
+#define VX900_IGA1_FIFO_MAX_DEPTH               400
+#define VX900_IGA1_FIFO_THRESHOLD               320
+#define VX900_IGA1_FIFO_HIGH_THRESHOLD          320
+#define VX900_IGA1_DISPLAY_QUEUE_EXPIRE_NUM     160
+
+#define VX900_IGA2_FIFO_MAX_DEPTH               192
+#define VX900_IGA2_FIFO_THRESHOLD               160
+#define VX900_IGA2_FIFO_HIGH_THRESHOLD          160
+#define VX900_IGA2_DISPLAY_QUEUE_EXPIRE_NUM     320
+
 #define IGA1_FIFO_DEPTH_SELECT_REG_NUM          1
 #define IGA1_FIFO_THRESHOLD_REG_NUM             2
 #define IGA1_FIFO_HIGH_THRESHOLD_REG_NUM        2
@@ -858,6 +890,8 @@ struct iga2_crtc_timing {
 #define VX800_FUNCTION3     0x3353
 /* VT3409 chipset*/
 #define VX855_FUNCTION3     0x3409
+/* VT3410 chipset*/
+#define VX900_FUNCTION3     0x3410
 
 #define NUM_TOTAL_PLL_TABLE ARRAY_SIZE(pll_value)
 
@@ -873,6 +907,11 @@ struct pci_device_id_info {
        u32 chip_index;
 };
 
+struct via_device_mapping {
+       u32 device;
+       const char *name;
+};
+
 extern unsigned int viafb_second_virtual_xres;
 extern int viafb_SAMM_ON;
 extern int viafb_dual_fb;
@@ -881,9 +920,6 @@ extern int viafb_LCD_ON;
 extern int viafb_DVI_ON;
 extern int viafb_hotplug;
 
-void viafb_set_output_path(int device, int set_iga,
-       int output_interface);
-
 void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
        struct VideoModeTable *video_mode, int bpp_byte, int set_iga);
 
@@ -891,8 +927,11 @@ void viafb_set_vclock(u32 CLK, int set_iga);
 void viafb_load_reg(int timing_value, int viafb_load_reg_num,
        struct io_register *reg,
              int io_type);
-void viafb_crt_disable(void);
-void viafb_crt_enable(void);
+void via_set_source(u32 devices, u8 iga);
+void via_set_state(u32 devices, u8 state);
+void via_set_sync_polarity(u32 devices, u8 polarity);
+u32 via_parse_odev(char *input, char **end);
+void via_odev_to_seq(struct seq_file *m, u32 odev);
 void init_ad9389(void);
 /* Access I/O Function */
 void viafb_lock_crt(void);
@@ -908,8 +947,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
        struct VideoModeTable *vmode_tbl1, int video_bpp1);
 void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
        struct VideoModeTable *vmode_tbl);
-void viafb_init_chip_info(int chip_type);
-void viafb_init_dac(int set_iga);
+void __devinit viafb_init_chip_info(int chip_type);
+void __devinit viafb_init_dac(int set_iga);
 int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
 int viafb_get_refresh(int hres, int vres, u32 float_refresh);
 void viafb_update_device_setting(int hres, int vres, int bpp,
index 4d553d0b8d7a450b9337a9b4375856c5b304217c..ea1c51428823e3bdc14e73bfaefa42dac283b0af 100644 (file)
@@ -94,6 +94,7 @@ int viafb_ioctl_hotplug(int hres, int vres, int bpp)
                                viafb_CRT_ON = 0;
                                viafb_LCD_ON = 0;
                                viafb_DeviceStatus = DVI_Device;
+                               viafb_set_iga_path();
                                return viafb_DeviceStatus;
                        }
                        status = 1;
@@ -107,6 +108,7 @@ int viafb_ioctl_hotplug(int hres, int vres, int bpp)
                viafb_LCD_ON = 0;
 
                viafb_DeviceStatus = CRT_Device;
+               viafb_set_iga_path();
                return viafb_DeviceStatus;
        }
 
index fc25ae30c5f6663140ac519bae24495330e487c1..3425c3969806ccae2739b7ae6338bb1ccdb2d0ec 100644 (file)
 #include <linux/via-core.h>
 #include <linux/via_i2c.h>
 #include "global.h"
-#include "lcdtbl.h"
 
 #define viafb_compact_res(x, y) (((x)<<16)|(y))
 
+/* CLE266 Software Power Sequence */
+/* {Mask}, {Data}, {Delay} */
+int PowerSequenceOn[3][3] = { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06},
+       {0x19, 0x1FE, 0x01} };
+int PowerSequenceOff[3][3] = { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00},
+       {0xD2, 0x19, 0x01} };
+
 static struct _lcd_scaling_factor lcd_scaling_factor = {
        /* LCD Horizontal Scaling Factor Register */
        {LCD_HOR_SCALING_FACTOR_REG_NUM,
@@ -42,7 +48,7 @@ static struct _lcd_scaling_factor lcd_scaling_factor_CLE = {
 
 static int check_lvds_chip(int device_id_subaddr, int device_id);
 static bool lvds_identify_integratedlvds(void);
-static void fp_id_to_vindex(int panel_id);
+static void __devinit fp_id_to_vindex(int panel_id);
 static int lvds_register_read(int index);
 static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
                      int panel_vres);
@@ -84,7 +90,7 @@ static int check_lvds_chip(int device_id_subaddr, int device_id)
                return FAIL;
 }
 
-void viafb_init_lcd_size(void)
+void __devinit viafb_init_lcd_size(void)
 {
        DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
 
@@ -144,7 +150,7 @@ static bool lvds_identify_integratedlvds(void)
        return true;
 }
 
-int viafb_lvds_trasmitter_identify(void)
+int __devinit viafb_lvds_trasmitter_identify(void)
 {
        if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
                viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
@@ -185,7 +191,7 @@ int viafb_lvds_trasmitter_identify(void)
        return FAIL;
 }
 
-static void fp_id_to_vindex(int panel_id)
+static void __devinit fp_id_to_vindex(int panel_id)
 {
        DEBUG_MSG(KERN_INFO "fp_get_panel_id()\n");
 
@@ -436,6 +442,7 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
                case UNICHROME_CN750:
                case UNICHROME_VX800:
                case UNICHROME_VX855:
+               case UNICHROME_VX900:
                        reg_value =
                            K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
                        /* Horizontal scaling enabled */
@@ -479,6 +486,7 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
                case UNICHROME_CN750:
                case UNICHROME_VX800:
                case UNICHROME_VX855:
+               case UNICHROME_VX900:
                        reg_value =
                            K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
                        /* Vertical scaling enabled */
@@ -655,9 +663,6 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
        pll_D_N = viafb_get_clk_value(panel_crt_table[0].clk);
        DEBUG_MSG(KERN_INFO "PLL=0x%x", pll_D_N);
        viafb_set_vclock(pll_D_N, set_iga);
-
-       viafb_set_output_path(DEVICE_LCD, set_iga,
-               plvds_chip_info->output_interface);
        lcd_patch_skew(plvds_setting_info, plvds_chip_info);
 
        /* If K8M800, enable LCD Prefetch Mode. */
@@ -700,9 +705,6 @@ static void integrated_lvds_disable(struct lvds_setting_information
                viafb_write_reg_mask(CR91, VIACR, 0xC0, BIT6 + BIT7);
        }
 
-       /* Turn DFP High/Low Pad off. */
-       viafb_write_reg_mask(SR2A, VIASR, 0, BIT0 + BIT1 + BIT2 + BIT3);
-
        /* Power off LVDS channel. */
        switch (plvds_chip_info->output_interface) {
        case INTERFACE_LVDS0:
@@ -758,9 +760,6 @@ static void integrated_lvds_enable(struct lvds_setting_information
                break;
        }
 
-       /* Turn DFP High/Low pad on. */
-       viafb_write_reg_mask(SR2A, VIASR, 0x0F, BIT0 + BIT1 + BIT2 + BIT3);
-
        /* Power on LVDS channel. */
        switch (plvds_chip_info->output_interface) {
        case INTERFACE_LVDS0:
@@ -809,29 +808,48 @@ void viafb_lcd_disable(void)
                viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info,
                                    &viaparinfo->chip_info->lvds_chip_info);
        } else {
-               /* DFP-HL pad off          */
-               viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x0F);
                /* Backlight off           */
                viafb_write_reg_mask(SR3D, VIASR, 0x00, 0x20);
                /* 24 bit DI data paht off */
                viafb_write_reg_mask(CR91, VIACR, 0x80, 0x80);
-               /* Simultaneout disabled   */
-               viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08);
        }
 
        /* Disable expansion bit   */
        viafb_write_reg_mask(CR79, VIACR, 0x00, 0x01);
-       /* CRT path set to IGA1    */
-       viafb_write_reg_mask(SR16, VIASR, 0x00, 0x40);
        /* Simultaneout disabled   */
        viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08);
-       /* IGA2 path disabled      */
-       viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x80);
+}
 
+static void set_lcd_output_path(int set_iga, int output_interface)
+{
+       switch (output_interface) {
+       case INTERFACE_DFP:
+               if ((UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name)
+                   || (UNICHROME_P4M890 ==
+                   viaparinfo->chip_info->gfx_chip_name))
+                       viafb_write_reg_mask(CR97, VIACR, 0x84,
+                                      BIT7 + BIT2 + BIT1 + BIT0);
+       case INTERFACE_DVP0:
+       case INTERFACE_DVP1:
+       case INTERFACE_DFP_HIGH:
+       case INTERFACE_DFP_LOW:
+               if (set_iga == IGA2)
+                       viafb_write_reg(CR91, VIACR, 0x00);
+               break;
+       }
 }
 
 void viafb_lcd_enable(void)
 {
+       viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3);
+       viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
+       set_lcd_output_path(viaparinfo->lvds_setting_info->iga_path,
+               viaparinfo->chip_info->lvds_chip_info.output_interface);
+       if (viafb_LCD2_ON)
+               set_lcd_output_path(viaparinfo->lvds_setting_info2->iga_path,
+                       viaparinfo->chip_info->
+                       lvds_chip_info2.output_interface);
+
        if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
                /* DI1 pad on */
                viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30);
@@ -855,39 +873,13 @@ void viafb_lcd_enable(void)
                viafb_enable_lvds_vt1636(viaparinfo->lvds_setting_info,
                                   &viaparinfo->chip_info->lvds_chip_info);
        } else {
-               /* DFP-HL pad on           */
-               viafb_write_reg_mask(SR2A, VIASR, 0x0F, 0x0F);
                /* Backlight on            */
                viafb_write_reg_mask(SR3D, VIASR, 0x20, 0x20);
                /* 24 bit DI data paht on  */
                viafb_write_reg_mask(CR91, VIACR, 0x00, 0x80);
-
-               /* Set data source selection bit by iga path */
-               if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
-                       /* DFP-H set to IGA1       */
-                       viafb_write_reg_mask(CR97, VIACR, 0x00, 0x10);
-                       /* DFP-L set to IGA1       */
-                       viafb_write_reg_mask(CR99, VIACR, 0x00, 0x10);
-               } else {
-                       /* DFP-H set to IGA2       */
-                       viafb_write_reg_mask(CR97, VIACR, 0x10, 0x10);
-                       /* DFP-L set to IGA2       */
-                       viafb_write_reg_mask(CR99, VIACR, 0x10, 0x10);
-               }
                /* LCD enabled             */
                viafb_write_reg_mask(CR6A, VIACR, 0x48, 0x48);
        }
-
-       if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
-               /* CRT path set to IGA2    */
-               viafb_write_reg_mask(SR16, VIASR, 0x40, 0x40);
-               /* IGA2 path disabled      */
-               viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x80);
-               /* IGA2 path enabled       */
-       } else {                /* IGA2 */
-               viafb_write_reg_mask(CR6A, VIACR, 0x80, 0x80);
-       }
-
 }
 
 static void lcd_powersequence_off(void)
@@ -993,7 +985,7 @@ static void check_diport_of_integrated_lvds(
                  plvds_chip_info->output_interface);
 }
 
-void viafb_init_lvds_output_interface(struct lvds_chip_information
+void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
                                *plvds_chip_info,
                                struct lvds_setting_information
                                *plvds_setting_info)
index b348efc360b822b9211ce104cd71b07531da6dc2..c7909fe29550a7bd8846c51244eb48743d3cd28a 100644 (file)
@@ -71,15 +71,15 @@ void viafb_enable_lvds_vt1636(struct lvds_setting_information
                        struct lvds_chip_information *plvds_chip_info);
 void viafb_lcd_disable(void);
 void viafb_lcd_enable(void);
-void viafb_init_lcd_size(void);
-void viafb_init_lvds_output_interface(struct lvds_chip_information
+void __devinit viafb_init_lcd_size(void);
+void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
                                *plvds_chip_info,
                                struct lvds_setting_information
                                *plvds_setting_info);
 void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
                  struct lvds_setting_information *plvds_setting_info,
                  struct lvds_chip_information *plvds_chip_info);
-int viafb_lvds_trasmitter_identify(void);
+int __devinit viafb_lvds_trasmitter_identify(void);
 void viafb_init_lvds_output_interface(struct lvds_chip_information
                                *plvds_chip_info,
                                struct lvds_setting_information
diff --git a/drivers/video/via/lcdtbl.h b/drivers/video/via/lcdtbl.h
deleted file mode 100644 (file)
index 6f3dd80..0000000
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2, or (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
- * the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE.See the GNU General Public License
- * for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#ifndef __LCDTBL_H__
-#define __LCDTBL_H__
-
-#include "share.h"
-
-/* CLE266 Software Power Sequence */
-/* {Mask}, {Data}, {Delay} */
-int PowerSequenceOn[3][3] =
-    { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06}, {0x19, 0x1FE, 0x01} };
-int PowerSequenceOff[3][3] =
-    { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00}, {0xD2, 0x19, 0x01} };
-
-/* ++++++ P880 ++++++ */
-/*   Panel 1600x1200   */
-struct io_reg P880_LCD_RES_6X4_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x73}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x73}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x5A}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x5E},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xD6}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR44, 0xFF, 0x7D}, {VIASR, SR45, 0xFF, 0x8C},
-       {VIASR, SR46, 0xFF, 0x02}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_6X4_16X12 ARRAY_SIZE(P880_LCD_RES_6X4_16X12)
-
-struct io_reg P880_LCD_RES_7X4_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x67}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x67}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x74}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x78},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xF5}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR44, 0xFF, 0x78}, {VIASR, SR45, 0xFF, 0x8C},
-       {VIASR, SR46, 0xFF, 0x01}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_7X4_16X12 ARRAY_SIZE(P880_LCD_RES_7X4_16X12)
-
-struct io_reg P880_LCD_RES_8X6_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x83},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xE1}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR44, 0xFF, 0x6D}, {VIASR, SR45, 0xFF, 0x88},
-       {VIASR, SR46, 0xFF, 0x03}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_8X6_16X12 ARRAY_SIZE(P880_LCD_RES_8X6_16X12)
-
-struct io_reg P880_LCD_RES_10X7_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0xAB}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xAF},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xF0}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR44, 0xFF, 0x92}, {VIASR, SR45, 0xFF, 0x88},
-       {VIASR, SR46, 0xFF, 0x03}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_10X7_16X12 ARRAY_SIZE(P880_LCD_RES_10X7_16X12)
-
-struct io_reg P880_LCD_RES_12X10_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x7D}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x7D}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0xD0}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xD4},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xFA}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR44, 0xFF, 0xF6}, {VIASR, SR45, 0xFF, 0x88},
-       {VIASR, SR46, 0xFF, 0x05}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_12X10_16X12 ARRAY_SIZE(P880_LCD_RES_12X10_16X12)
-
-/*   Panel 1400x1050   */
-struct io_reg P880_LCD_RES_6X4_14X10[] = {
-       /* 640x480                          */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x63},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xB4}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR44, 0xFF, 0xC6}, {VIASR, SR45, 0xFF, 0x8C},
-       {VIASR, SR46, 0xFF, 0x05}
-};
-
-#define NUM_TOTAL_P880_LCD_RES_6X4_14X10 ARRAY_SIZE(P880_LCD_RES_6X4_14X10)
-
-struct io_reg P880_LCD_RES_8X6_14X10[] = {
-       /* 800x600                          */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x83},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR44, 0xFF, 0x06}, {VIASR, SR45, 0xFF, 0x8D},
-       {VIASR, SR46, 0xFF, 0x05}
-};
-
-#define NUM_TOTAL_P880_LCD_RES_8X6_14X10 ARRAY_SIZE(P880_LCD_RES_8X6_14X10)
-
-/* ++++++ K400 ++++++ */
-/*   Panel 1600x1200   */
-struct io_reg K400_LCD_RES_6X4_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x73}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x73}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x5A}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x5E},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xDA}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0xC4}, {VIASR, SR47, 0xFF, 0x7F}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_16X12 ARRAY_SIZE(K400_LCD_RES_6X4_16X12)
-
-struct io_reg K400_LCD_RES_7X4_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x67}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x67}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x74}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x78},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xF5}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x46}, {VIASR, SR47, 0xFF, 0x3D}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_16X12 ARRAY_SIZE(K400_LCD_RES_7X4_16X12)
-
-struct io_reg K400_LCD_RES_8X6_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x83},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xE1}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x85}, {VIASR, SR47, 0xFF, 0x6F}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_16X12 ARRAY_SIZE(K400_LCD_RES_8X6_16X12)
-
-struct io_reg K400_LCD_RES_10X7_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0xAB}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xAF},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xF0}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x45}, {VIASR, SR47, 0xFF, 0x4A}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_16X12 ARRAY_SIZE(K400_LCD_RES_10X7_16X12)
-
-struct io_reg K400_LCD_RES_12X10_16X12[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x7D}, {VIACR, CR55, 0x0F, 0x08},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x7D}, {VIACR, CR54, 0x38, 0x00},
-       {VIACR, CR5D, 0x40, 0x40},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0xD0}, {VIACR, CR71, 0x08, 0x00},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xD4},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xFA}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x47}, {VIASR, SR47, 0xFF, 0x7C}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_12X10_16X12 ARRAY_SIZE(K400_LCD_RES_12X10_16X12)
-
-/*   Panel 1400x1050   */
-struct io_reg K400_LCD_RES_6X4_14X10[] = {
-       /* 640x400                          */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x63},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xB4}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x19}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_14X10 ARRAY_SIZE(K400_LCD_RES_6X4_14X10)
-
-struct io_reg K400_LCD_RES_8X6_14X10[] = {
-       /* 800x600                          */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x83},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x21}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_14X10 ARRAY_SIZE(K400_LCD_RES_8X6_14X10)
-
-struct io_reg K400_LCD_RES_10X7_14X10[] = {
-       /* 1024x768                         */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0xA3}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xA7},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xC3}, {VIACR, CR67, 0x03, 0x04},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x1E}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_14X10 ARRAY_SIZE(K400_LCD_RES_10X7_14X10)
-
-struct io_reg K400_LCD_RES_12X10_14X10[] = {
-       /* 1280x768, 1280x960, 1280x1024    */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x97}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x97}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0xCE}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xD2},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xC9}, {VIACR, CR67, 0x03, 0x04},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0x79}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_12X10_14X10 ARRAY_SIZE(K400_LCD_RES_12X10_14X10)
-
-/* ++++++ K400 ++++++ */
-/*   Panel 1366x768   */
-struct io_reg K400_LCD_RES_6X4_1366X7[] = {
-       /* 640x400                          */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x47}, {VIACR, CR55, 0x0F, 0x35},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x47}, {VIACR, CR54, 0x38, 0x2B},
-       {VIACR, CR5D, 0x40, 0x13},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x60}, {VIACR, CR71, 0x08, 0x23},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x64},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x87}, {VIASR, SR47, 0xFF, 0x4C}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_1366X7 ARRAY_SIZE(K400_LCD_RES_6X4_1366X7)
-
-struct io_reg K400_LCD_RES_7X4_1366X7[] = {
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x3B}, {VIACR, CR55, 0x0F, 0x35},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x3B}, {VIACR, CR54, 0x38, 0x2B},
-       {VIACR, CR5D, 0x40, 0x13},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x71}, {VIACR, CR71, 0x08, 0x23},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x75},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0x96}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x10}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_1366X7 ARRAY_SIZE(K400_LCD_RES_7X4_1366X7)
-
-struct io_reg K400_LCD_RES_8X6_1366X7[] = {
-       /* 800x600                          */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x37}, {VIACR, CR55, 0x0F, 0x35},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x37}, {VIACR, CR54, 0x38, 0x2B},
-       {VIACR, CR5D, 0x40, 0x13},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0x7E}, {VIACR, CR71, 0x08, 0x23},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x82},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0xB9}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_1366X7 ARRAY_SIZE(K400_LCD_RES_8X6_1366X7)
-
-struct io_reg K400_LCD_RES_10X7_1366X7[] = {
-       /* 1024x768                         */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0xA3}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xA7},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xC3}, {VIACR, CR67, 0x03, 0x04},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x1E}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_1366X7 ARRAY_SIZE(K400_LCD_RES_10X7_1366X7)
-
-struct io_reg K400_LCD_RES_12X10_1366X7[] = {
-       /* 1280x768, 1280x960, 1280x1024    */
-       /* IGA2 Horizontal Total            */
-       {VIACR, CR50, 0xFF, 0x97}, {VIACR, CR55, 0x0F, 0x56},
-       /* IGA2 Horizontal Blank End        */
-       {VIACR, CR53, 0xFF, 0x97}, {VIACR, CR54, 0x38, 0x75},
-       {VIACR, CR5D, 0x40, 0x24},
-       /* IGA2 Horizontal Total Shadow     */
-       {VIACR, CR6D, 0xFF, 0xCE}, {VIACR, CR71, 0x08, 0x44},
-       /* IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xD2},
-       /* IGA2 Offset                      */
-       {VIACR, CR66, 0xFF, 0xC9}, {VIACR, CR67, 0x03, 0x04},
-       /* VCLK                             */
-       {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0x79}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_12X10_1366X7\
-                       ARRAY_SIZE(K400_LCD_RES_12X10_1366X7)
-
-/* ++++++ K400 ++++++ */
-/*   Panel 1280x1024   */
-struct io_reg K400_LCD_RES_6X4_12X10[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
-       {VIACR, CR5D, 0x40, 0x1C},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x34},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x63},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xAA}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x19}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_12X10 ARRAY_SIZE(K400_LCD_RES_6X4_12X10)
-
-struct io_reg K400_LCD_RES_7X4_12X10[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
-       {VIACR, CR5D, 0x40, 0x1C},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x68}, {VIACR, CR71, 0x08, 0x34},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x6C},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xA8}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x87}, {VIASR, SR47, 0xFF, 0xED}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_12X10 ARRAY_SIZE(K400_LCD_RES_7X4_12X10)
-
-struct io_reg K400_LCD_RES_8X6_12X10[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
-       {VIACR, CR5D, 0x40, 0x1C},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x34},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x83},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x21}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_12X10 ARRAY_SIZE(K400_LCD_RES_8X6_12X10)
-
-struct io_reg K400_LCD_RES_10X7_12X10[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
-       {VIACR, CR5D, 0x40, 0x1C},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0xA3}, {VIACR, CR71, 0x08, 0x34},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0xA7},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x04},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x1E}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_12X10 ARRAY_SIZE(K400_LCD_RES_10X7_12X10)
-
-/* ++++++ K400 ++++++ */
-/*   Panel 1024x768    */
-struct io_reg K400_LCD_RES_6X4_10X7[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x47}, {VIACR, CR55, 0x0F, 0x35},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x47}, {VIACR, CR54, 0x38, 0x2B},
-       {VIACR, CR5D, 0x40, 0x13},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x60}, {VIACR, CR71, 0x08, 0x23},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x64},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x87}, {VIASR, SR47, 0xFF, 0x4C}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_10X7 ARRAY_SIZE(K400_LCD_RES_6X4_10X7)
-
-struct io_reg K400_LCD_RES_7X4_10X7[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x3B}, {VIACR, CR55, 0x0F, 0x35},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x3B}, {VIACR, CR54, 0x38, 0x2B},
-       {VIACR, CR5D, 0x40, 0x13},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x71}, {VIACR, CR71, 0x08, 0x23},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x75},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0x96}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x10}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_10X7 ARRAY_SIZE(K400_LCD_RES_7X4_10X7)
-
-struct io_reg K400_LCD_RES_8X6_10X7[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x37}, {VIACR, CR55, 0x0F, 0x35},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x37}, {VIACR, CR54, 0x38, 0x2B},
-       {VIACR, CR5D, 0x40, 0x13},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x7E}, {VIACR, CR71, 0x08, 0x23},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x82},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0xB9}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_10X7 ARRAY_SIZE(K400_LCD_RES_8X6_10X7)
-
-/* ++++++ K400 ++++++ */
-/*   Panel 800x600     */
-struct io_reg K400_LCD_RES_6X4_8X6[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x1A}, {VIACR, CR55, 0x0F, 0x34},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x1A}, {VIACR, CR54, 0x38, 0xE3},
-       {VIACR, CR5D, 0x40, 0x12},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x22},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x63},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0x6E}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0x86}, {VIASR, SR47, 0xFF, 0xB3}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_8X6 ARRAY_SIZE(K400_LCD_RES_6X4_8X6)
-
-struct io_reg K400_LCD_RES_7X4_8X6[] = {
-       /*IGA2 Horizontal Total */
-       {VIACR, CR50, 0xFF, 0x1F}, {VIACR, CR55, 0x0F, 0x34},
-       /*IGA2 Horizontal Blank End */
-       {VIACR, CR53, 0xFF, 0x1F}, {VIACR, CR54, 0x38, 0xE3},
-       {VIACR, CR5D, 0x40, 0x12},
-       /*IGA2 Horizontal Total Shadow */
-       {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x22},
-       /*IGA2 Horizontal Blank End Shadow */
-       {VIACR, CR6E, 0xFF, 0x83},
-       /*IGA2 Offset */
-       {VIACR, CR66, 0xFF, 0x78}, {VIACR, CR67, 0x03, 0x00},
-        /*VCLK*/ {VIASR, SR46, 0xFF, 0xC4}, {VIASR, SR47, 0xFF, 0x59}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_8X6 ARRAY_SIZE(K400_LCD_RES_7X4_8X6)
-
-#endif /* __LCDTBL_H__ */
diff --git a/drivers/video/via/tbl1636.c b/drivers/video/via/tbl1636.c
deleted file mode 100644 (file)
index 2d84534..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2, or (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
- * the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE.See the GNU General Public License
- * for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include "global.h"
-struct IODATA COMMON_INIT_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       /* Set panel power sequence timing */
-       {0x10, 0xC0, 0x00},
-       /* T1: VDD on - Data on. Each increment is 1 ms. (50ms = 031h) */
-       {0x0B, 0xFF, 0x40},
-       /* T2: Data on - Backlight on. Each increment is 2 ms. (210ms = 068h) */
-       {0x0C, 0xFF, 0x31},
-       /* T3: Backlight off -Data off. Each increment is 2 ms. (210ms = 068h)*/
-       {0x0D, 0xFF, 0x31},
-       /* T4: Data off - VDD off. Each increment is 1 ms. (50ms = 031h) */
-       {0x0E, 0xFF, 0x68},
-       /* T5: VDD off - VDD on. Each increment is 100 ms. (500ms = 04h) */
-       {0x0F, 0xFF, 0x68},
-       /* LVDS output power up */
-       {0x09, 0xA0, 0xA0},
-       /* turn on back light */
-       {0x10, 0x33, 0x13}
-};
-
-struct IODATA DUAL_CHANNEL_ENABLE_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       {0x08, 0xF0, 0xE0}      /* Input Data Mode Select */
-};
-
-struct IODATA SINGLE_CHANNEL_ENABLE_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       {0x08, 0xF0, 0x00}      /* Input Data Mode Select */
-};
-
-struct IODATA DITHERING_ENABLE_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       {0x0A, 0x70, 0x50}
-};
-
-struct IODATA DITHERING_DISABLE_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       {0x0A, 0x70, 0x00}
-};
-
-struct IODATA VDD_ON_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       {0x10, 0x20, 0x20}
-};
-
-struct IODATA VDD_OFF_TBL_VT1636[] = {
-/*  Index, Mask, Value */
-       {0x10, 0x20, 0x00}
-};
diff --git a/drivers/video/via/tbl1636.h b/drivers/video/via/tbl1636.h
deleted file mode 100644 (file)
index d906055..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2, or (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
- * the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE.See the GNU General Public License
- * for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _TBL1636_H_
-#define _TBL1636_H_
-#include "hw.h"
-
-extern struct IODATA COMMON_INIT_TBL_VT1636[8];
-extern struct IODATA DUAL_CHANNEL_ENABLE_TBL_VT1636[1];
-extern struct IODATA SINGLE_CHANNEL_ENABLE_TBL_VT1636[1];
-extern struct IODATA DITHERING_ENABLE_TBL_VT1636[1];
-extern struct IODATA DITHERING_DISABLE_TBL_VT1636[1];
-extern struct IODATA VDD_ON_TBL_VT1636[1];
-extern struct IODATA VDD_OFF_TBL_VT1636[1];
-
-#endif /* _VIA_TBL1636_H_ */
index 66f40303311176c1672f34b0793264bc4c7ee600..31e30338e89306cf7e85387d2fcc8f969e9a0ac4 100644 (file)
@@ -20,7 +20,7 @@
  * The default port config.
  */
 static struct via_port_cfg adap_configs[] = {
-       [VIA_PORT_26]   = { VIA_PORT_I2C,  VIA_MODE_OFF, VIASR, 0x26 },
+       [VIA_PORT_26]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x26 },
        [VIA_PORT_31]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x31 },
        [VIA_PORT_25]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
        [VIA_PORT_2C]   = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
@@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
 static u16 via_function3[] = {
        CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
        CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
-       P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
+       P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
 };
 
 /* Get the BIOS-configured framebuffer size from PCI configuration space
@@ -370,6 +370,7 @@ static int viafb_get_fb_size_from_pci(int chip_type)
                case P4M900_FUNCTION3:
                case VX800_FUNCTION3:
                case VX855_FUNCTION3:
+               case VX900_FUNCTION3:
                /*case CN750_FUNCTION3: */
                        offset = 0xA0;
                        break;
@@ -474,7 +475,10 @@ static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
         * Eventually we want to move away from mapping this
         * entire region.
         */
-       vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
+       if (vdev->chip_type == UNICHROME_VX900)
+               vdev->fbmem_start = pci_resource_start(vdev->pdev, 2);
+       else
+               vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
        ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
        if (ret < 0)
                goto out_unmap;
@@ -635,6 +639,8 @@ static struct pci_device_id via_pci_table[] __devinitdata = {
          .driver_data = UNICHROME_VX800 },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
          .driver_data = UNICHROME_VX855 },
+       { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID),
+         .driver_data = UNICHROME_VX900 },
        { }
 };
 MODULE_DEVICE_TABLE(pci, via_pci_table);
@@ -644,6 +650,10 @@ static struct pci_driver via_driver = {
        .id_table       = via_pci_table,
        .probe          = via_pci_probe,
        .remove         = __devexit_p(via_pci_remove),
+#ifdef CONFIG_PM
+       .suspend        = viafb_suspend,
+       .resume         = viafb_resume,
+#endif
 };
 
 static int __init via_core_init(void)
index da9e4ca94b1748ce426584c404f70d70808a7902..3844b558b7bd2456d7650fa8275d70f5d38ca8e9 100644 (file)
@@ -114,6 +114,7 @@ static void via_i2c_setsda(void *data, int state)
 
 int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
 {
+       int ret;
        u8 mm1[] = {0x00};
        struct i2c_msg msgs[2];
 
@@ -126,11 +127,18 @@ int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
        mm1[0] = index;
        msgs[0].len = 1; msgs[1].len = 1;
        msgs[0].buf = mm1; msgs[1].buf = pdata;
-       return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+       ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+       if (ret == 2)
+               ret = 0;
+       else if (ret >= 0)
+               ret = -EIO;
+
+       return ret;
 }
 
 int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
 {
+       int ret;
        u8 msg[2] = { index, data };
        struct i2c_msg msgs;
 
@@ -140,11 +148,18 @@ int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
        msgs.addr = slave_addr / 2;
        msgs.len = 2;
        msgs.buf = msg;
-       return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
+       ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
+       if (ret == 1)
+               ret = 0;
+       else if (ret >= 0)
+               ret = -EIO;
+
+       return ret;
 }
 
 int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
 {
+       int ret;
        u8 mm1[] = {0x00};
        struct i2c_msg msgs[2];
 
@@ -156,7 +171,13 @@ int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len
        mm1[0] = index;
        msgs[0].len = 1; msgs[1].len = buff_len;
        msgs[0].buf = mm1; msgs[1].buf = buff;
-       return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+       ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+       if (ret == 2)
+               ret = 0;
+       else if (ret >= 0)
+               ret = -EIO;
+
+       return ret;
 }
 
 /*
@@ -181,8 +202,8 @@ static int create_i2c_bus(struct i2c_adapter *adapter,
        algo->setscl = via_i2c_setscl;
        algo->getsda = via_i2c_getsda;
        algo->getscl = via_i2c_getscl;
-       algo->udelay = 40;
-       algo->timeout = 20;
+       algo->udelay = 10;
+       algo->timeout = 2;
        algo->data = adap_cfg;
 
        sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
index bdd0e4130f4e7eeaf1224de5de47d15959570b08..d298cfccd6fc24bb842f69a9c8cf9945bdc8c8ae 100644 (file)
@@ -56,6 +56,32 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
 
 static struct fb_ops viafb_ops;
 
+/* supported output devices on each IGP
+ * only CX700, VX800, VX855, VX900 were documented
+ * VIA_CRT should be everywhere
+ * VIA_6C can be onle pre-CX700 (probably only on CLE266) as 6C is used for PLL
+ * source selection on CX700 and later
+ * K400 seems to support VIA_96, VIA_DVP1, VIA_LVDS{1,2} as in viamode.c
+ */
+static const u32 supported_odev_map[] = {
+       [UNICHROME_CLE266]      = VIA_CRT | VIA_LDVP0 | VIA_LDVP1,
+       [UNICHROME_K400]        = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+                               | VIA_LVDS2,
+       [UNICHROME_K800]        = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+                               | VIA_LVDS2,
+       [UNICHROME_PM800]       = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+                               | VIA_LVDS2,
+       [UNICHROME_CN700]       = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+                               | VIA_LVDS2,
+       [UNICHROME_CX700]       = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_CN750]       = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_K8M890]      = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_P4M890]      = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_P4M900]      = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_VX800]       = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_VX855]       = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+       [UNICHROME_VX900]       = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+};
 
 static void viafb_fill_var_color_info(struct fb_var_screeninfo *var, u8 depth)
 {
@@ -332,22 +358,22 @@ static int viafb_blank(int blank_mode, struct fb_info *info)
        case FB_BLANK_UNBLANK:
                /* Screen: On, HSync: On, VSync: On */
                /* control CRT monitor power management */
-               viafb_write_reg_mask(CR36, VIACR, 0x00, BIT4 + BIT5);
+               via_set_state(VIA_CRT, VIA_STATE_ON);
                break;
        case FB_BLANK_HSYNC_SUSPEND:
                /* Screen: Off, HSync: Off, VSync: On */
                /* control CRT monitor power management */
-               viafb_write_reg_mask(CR36, VIACR, 0x10, BIT4 + BIT5);
+               via_set_state(VIA_CRT, VIA_STATE_STANDBY);
                break;
        case FB_BLANK_VSYNC_SUSPEND:
                /* Screen: Off, HSync: On, VSync: Off */
                /* control CRT monitor power management */
-               viafb_write_reg_mask(CR36, VIACR, 0x20, BIT4 + BIT5);
+               via_set_state(VIA_CRT, VIA_STATE_SUSPEND);
                break;
        case FB_BLANK_POWERDOWN:
                /* Screen: Off, HSync: Off, VSync: Off */
                /* control CRT monitor power management */
-               viafb_write_reg_mask(CR36, VIACR, 0x30, BIT4 + BIT5);
+               via_set_state(VIA_CRT, VIA_STATE_OFF);
                break;
        }
 
@@ -457,7 +483,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
                if (copy_from_user(&gpu32, argp, sizeof(gpu32)))
                        return -EFAULT;
                if (gpu32 & CRT_Device)
-                       viafb_crt_enable();
+                       via_set_state(VIA_CRT, VIA_STATE_ON);
                if (gpu32 & DVI_Device)
                        viafb_dvi_enable();
                if (gpu32 & LCD_Device)
@@ -467,7 +493,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
                if (copy_from_user(&gpu32, argp, sizeof(gpu32)))
                        return -EFAULT;
                if (gpu32 & CRT_Device)
-                       viafb_crt_disable();
+                       via_set_state(VIA_CRT, VIA_STATE_OFF);
                if (gpu32 & DVI_Device)
                        viafb_dvi_disable();
                if (gpu32 & LCD_Device)
@@ -787,7 +813,8 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
                bg_color = cursor->image.bg_color;
                if (chip_name == UNICHROME_CX700 ||
                        chip_name == UNICHROME_VX800 ||
-                       chip_name == UNICHROME_VX855) {
+                       chip_name == UNICHROME_VX855 ||
+                       chip_name == UNICHROME_VX900) {
                        fg_color =
                                ((info->cmap.red[fg_color] & 0xFFC0) << 14) |
                                ((info->cmap.green[fg_color] & 0xFFC0) << 4) |
@@ -961,7 +988,7 @@ static void retrieve_device_setting(struct viafb_ioctl_setting
        setting_info->lcd_attributes.lcd_mode = viafb_lcd_mode;
 }
 
-static int parse_active_dev(void)
+static int __init parse_active_dev(void)
 {
        viafb_CRT_ON = STATE_OFF;
        viafb_DVI_ON = STATE_OFF;
@@ -1031,7 +1058,7 @@ static int parse_active_dev(void)
        return 0;
 }
 
-static int parse_port(char *opt_str, int *output_interface)
+static int __devinit parse_port(char *opt_str, int *output_interface)
 {
        if (!strncmp(opt_str, "DVP0", 4))
                *output_interface = INTERFACE_DVP0;
@@ -1048,7 +1075,7 @@ static int parse_port(char *opt_str, int *output_interface)
        return 0;
 }
 
-static void parse_lcd_port(void)
+static void __devinit parse_lcd_port(void)
 {
        parse_port(viafb_lcd_port, &viaparinfo->chip_info->lvds_chip_info.
                output_interface);
@@ -1061,7 +1088,7 @@ static void parse_lcd_port(void)
                  output_interface);
 }
 
-static void parse_dvi_port(void)
+static void __devinit parse_dvi_port(void)
 {
        parse_port(viafb_dvi_port, &viaparinfo->chip_info->tmds_chip_info.
                output_interface);
@@ -1431,38 +1458,196 @@ static const struct file_operations viafb_vt1636_proc_fops = {
        .write          = viafb_vt1636_proc_write,
 };
 
-static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
+static int viafb_sup_odev_proc_show(struct seq_file *m, void *v)
 {
-       *viafb_entry = proc_mkdir("viafb", NULL);
-       if (*viafb_entry) {
-               proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops);
-               proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops);
-               proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops);
-               proc_create("dfpl", 0, *viafb_entry, &viafb_dfpl_proc_fops);
-               if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.
-                       lvds_chip_name || VT1636_LVDS ==
-                   viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) {
-                       proc_create("vt1636", 0, *viafb_entry, &viafb_vt1636_proc_fops);
-               }
+       via_odev_to_seq(m, supported_odev_map[
+               viaparinfo->shared->chip_info.gfx_chip_name]);
+       return 0;
+}
+
+static int viafb_sup_odev_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, viafb_sup_odev_proc_show, NULL);
+}
+
+static const struct file_operations viafb_sup_odev_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = viafb_sup_odev_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static ssize_t odev_update(const char __user *buffer, size_t count, u32 *odev)
+{
+       char buf[64], *ptr = buf;
+       u32 devices;
+       bool add, sub;
+
+       if (count < 1 || count > 63)
+               return -EINVAL;
+       if (copy_from_user(&buf[0], buffer, count))
+               return -EFAULT;
+       buf[count] = '\0';
+       add = buf[0] == '+';
+       sub = buf[0] == '-';
+       if (add || sub)
+               ptr++;
+       devices = via_parse_odev(ptr, &ptr);
+       if (*ptr == '\n')
+               ptr++;
+       if (*ptr != 0)
+               return -EINVAL;
+       if (add)
+               *odev |= devices;
+       else if (sub)
+               *odev &= ~devices;
+       else
+               *odev = devices;
+       return count;
+}
+
+static int viafb_iga1_odev_proc_show(struct seq_file *m, void *v)
+{
+       via_odev_to_seq(m, viaparinfo->shared->iga1_devices);
+       return 0;
+}
+
+static int viafb_iga1_odev_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, viafb_iga1_odev_proc_show, NULL);
+}
+
+static ssize_t viafb_iga1_odev_proc_write(struct file *file,
+       const char __user *buffer, size_t count, loff_t *pos)
+{
+       u32 dev_on, dev_off, dev_old, dev_new;
+       ssize_t res;
+
+       dev_old = dev_new = viaparinfo->shared->iga1_devices;
+       res = odev_update(buffer, count, &dev_new);
+       if (res != count)
+               return res;
+       dev_off = dev_old & ~dev_new;
+       dev_on = dev_new & ~dev_old;
+       viaparinfo->shared->iga1_devices = dev_new;
+       viaparinfo->shared->iga2_devices &= ~dev_new;
+       via_set_state(dev_off, VIA_STATE_OFF);
+       via_set_source(dev_new, IGA1);
+       via_set_state(dev_on, VIA_STATE_ON);
+       return res;
+}
+
+static const struct file_operations viafb_iga1_odev_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = viafb_iga1_odev_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .write          = viafb_iga1_odev_proc_write,
+};
+
+static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v)
+{
+       via_odev_to_seq(m, viaparinfo->shared->iga2_devices);
+       return 0;
+}
 
+static int viafb_iga2_odev_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, viafb_iga2_odev_proc_show, NULL);
+}
+
+static ssize_t viafb_iga2_odev_proc_write(struct file *file,
+       const char __user *buffer, size_t count, loff_t *pos)
+{
+       u32 dev_on, dev_off, dev_old, dev_new;
+       ssize_t res;
+
+       dev_old = dev_new = viaparinfo->shared->iga2_devices;
+       res = odev_update(buffer, count, &dev_new);
+       if (res != count)
+               return res;
+       dev_off = dev_old & ~dev_new;
+       dev_on = dev_new & ~dev_old;
+       viaparinfo->shared->iga2_devices = dev_new;
+       viaparinfo->shared->iga1_devices &= ~dev_new;
+       via_set_state(dev_off, VIA_STATE_OFF);
+       via_set_source(dev_new, IGA2);
+       via_set_state(dev_on, VIA_STATE_ON);
+       return res;
+}
+
+static const struct file_operations viafb_iga2_odev_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = viafb_iga2_odev_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .write          = viafb_iga2_odev_proc_write,
+};
+
+#define IS_VT1636(lvds_chip)   ((lvds_chip).lvds_chip_name == VT1636_LVDS)
+static void viafb_init_proc(struct viafb_shared *shared)
+{
+       struct proc_dir_entry *iga1_entry, *iga2_entry,
+               *viafb_entry = proc_mkdir("viafb", NULL);
+
+       shared->proc_entry = viafb_entry;
+       if (viafb_entry) {
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+               proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops);
+               proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops);
+               proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops);
+               proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops);
+               if (IS_VT1636(shared->chip_info.lvds_chip_info)
+                       || IS_VT1636(shared->chip_info.lvds_chip_info2))
+                       proc_create("vt1636", 0, viafb_entry,
+                               &viafb_vt1636_proc_fops);
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
+               proc_create("supported_output_devices", 0, viafb_entry,
+                       &viafb_sup_odev_proc_fops);
+               iga1_entry = proc_mkdir("iga1", viafb_entry);
+               shared->iga1_proc_entry = iga1_entry;
+               proc_create("output_devices", 0, iga1_entry,
+                       &viafb_iga1_odev_proc_fops);
+               iga2_entry = proc_mkdir("iga2", viafb_entry);
+               shared->iga2_proc_entry = iga2_entry;
+               proc_create("output_devices", 0, iga2_entry,
+                       &viafb_iga2_odev_proc_fops);
        }
 }
-static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
+static void viafb_remove_proc(struct viafb_shared *shared)
 {
-       struct chip_information *chip_info = &viaparinfo->shared->chip_info;
+       struct proc_dir_entry *viafb_entry = shared->proc_entry,
+               *iga1_entry = shared->iga1_proc_entry,
+               *iga2_entry = shared->iga2_proc_entry;
 
+       if (!viafb_entry)
+               return;
+
+       remove_proc_entry("output_devices", iga2_entry);
+       remove_proc_entry("iga2", viafb_entry);
+       remove_proc_entry("output_devices", iga1_entry);
+       remove_proc_entry("iga1", viafb_entry);
+       remove_proc_entry("supported_output_devices", viafb_entry);
+
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
        remove_proc_entry("dvp0", viafb_entry);/* parent dir */
        remove_proc_entry("dvp1", viafb_entry);
        remove_proc_entry("dfph", viafb_entry);
        remove_proc_entry("dfpl", viafb_entry);
-       if (chip_info->lvds_chip_info.lvds_chip_name == VT1636_LVDS
-               || chip_info->lvds_chip_info2.lvds_chip_name == VT1636_LVDS)
+       if (IS_VT1636(shared->chip_info.lvds_chip_info)
+               || IS_VT1636(shared->chip_info.lvds_chip_info2))
                remove_proc_entry("vt1636", viafb_entry);
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
 
        remove_proc_entry("viafb", NULL);
 }
-
-#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+#undef IS_VT1636
 
 static int parse_mode(const char *str, u32 *xres, u32 *yres)
 {
@@ -1486,6 +1671,47 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
 }
 
 
+#ifdef CONFIG_PM
+int viafb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       if (state.event == PM_EVENT_SUSPEND) {
+               acquire_console_sem();
+               fb_set_suspend(viafbinfo, 1);
+
+               viafb_sync(viafbinfo);
+
+               pci_save_state(pdev);
+               pci_disable_device(pdev);
+               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+               release_console_sem();
+       }
+
+       return 0;
+}
+
+int viafb_resume(struct pci_dev *pdev)
+{
+       acquire_console_sem();
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       if (pci_enable_device(pdev))
+               goto fail;
+       pci_set_master(pdev);
+       if (viaparinfo->shared->vdev->engine_mmio)
+               viafb_reset_engine(viaparinfo);
+       viafb_set_par(viafbinfo);
+       if (viafb_dual_fb)
+               viafb_set_par(viafbinfo1);
+       fb_set_suspend(viafbinfo, 0);
+
+fail:
+       release_console_sem();
+       return 0;
+}
+
+#endif
+
+
 int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
 {
        u32 default_xres, default_yres;
@@ -1544,7 +1770,7 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
        viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
 
        viafbinfo->pseudo_palette = pseudo_pal;
-       if (viafb_accel && !viafb_init_engine(viafbinfo)) {
+       if (viafb_accel && !viafb_setup_engine(viafbinfo)) {
                viafbinfo->flags |= FBINFO_HWACCEL_COPYAREA |
                        FBINFO_HWACCEL_FILLRECT |  FBINFO_HWACCEL_IMAGEBLIT;
                default_var.accel_flags = FB_ACCELF_TEXT;
@@ -1671,9 +1897,7 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
                  viafbinfo->node, viafbinfo->fix.id, default_var.xres,
                  default_var.yres, default_var.bits_per_pixel);
 
-#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
-       viafb_init_proc(&viaparinfo->shared->proc_entry);
-#endif
+       viafb_init_proc(viaparinfo->shared);
        viafb_init_dac(IGA2);
        return 0;
 
@@ -1700,9 +1924,7 @@ void __devexit via_fb_pci_remove(struct pci_dev *pdev)
        unregister_framebuffer(viafbinfo);
        if (viafb_dual_fb)
                unregister_framebuffer(viafbinfo1);
-#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
-       viafb_remove_proc(viaparinfo->shared->proc_entry);
-#endif
+       viafb_remove_proc(viaparinfo->shared);
        framebuffer_release(viafbinfo);
        if (viafb_dual_fb)
                framebuffer_release(viafbinfo1);
index 52a35fabba91dfbbb8e35c79e6dc79ab96915ba9..4960e3da6645a8d51e73f50754a71536cc15e327 100644 (file)
 #define VIAFB_NUM_I2C          5
 
 struct viafb_shared {
+       u32 iga1_devices;
+       u32 iga2_devices;
+
        struct proc_dir_entry *proc_entry;      /*viafb proc entry */
+       struct proc_dir_entry *iga1_proc_entry;
+       struct proc_dir_entry *iga2_proc_entry;
        struct viafb_dev *vdev;                 /* Global dev info */
 
        /* All the information will be needed to set engine */
@@ -103,4 +108,6 @@ void via_fb_pci_remove(struct pci_dev *pdev);
 /* Temporary */
 int viafb_init(void);
 void viafb_exit(void);
+int viafb_suspend(struct pci_dev *pdev, pm_message_t state);
+int viafb_resume(struct pci_dev *pdev);
 #endif /* __VIAFBDEV_H__ */
index d65bf1aee87c1868db5b1127afb95af5cf2426dd..60e4192c2b34845368d42d7c159dcfd6ff297bba 100644 (file)
 #include <linux/via_i2c.h>
 #include "global.h"
 
+static const struct IODATA common_init_data[] = {
+/*  Index, Mask, Value */
+       /* Set panel power sequence timing */
+       {0x10, 0xC0, 0x00},
+       /* T1: VDD on - Data on. Each increment is 1 ms. (50ms = 031h) */
+       {0x0B, 0xFF, 0x40},
+       /* T2: Data on - Backlight on. Each increment is 2 ms. (210ms = 068h) */
+       {0x0C, 0xFF, 0x31},
+       /* T3: Backlight off -Data off. Each increment is 2 ms. (210ms = 068h)*/
+       {0x0D, 0xFF, 0x31},
+       /* T4: Data off - VDD off. Each increment is 1 ms. (50ms = 031h) */
+       {0x0E, 0xFF, 0x68},
+       /* T5: VDD off - VDD on. Each increment is 100 ms. (500ms = 04h) */
+       {0x0F, 0xFF, 0x68},
+       /* LVDS output power up */
+       {0x09, 0xA0, 0xA0},
+       /* turn on back light */
+       {0x10, 0x33, 0x13}
+};
+
+/* Index, Mask, Value */
+static const struct IODATA dual_channel_enable_data = {0x08, 0xF0, 0xE0};
+static const struct IODATA single_channel_enable_data = {0x08, 0xF0, 0x00};
+static const struct IODATA dithering_enable_data = {0x0A, 0x70, 0x50};
+static const struct IODATA dithering_disable_data = {0x0A, 0x70, 0x00};
+static const struct IODATA vdd_on_data = {0x10, 0x20, 0x20};
+static const struct IODATA vdd_off_data = {0x10, 0x20, 0x00};
+
 u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
        *plvds_setting_info, struct lvds_chip_information *plvds_chip_info,
        u8 index)
@@ -55,108 +83,41 @@ void viafb_init_lvds_vt1636(struct lvds_setting_information
        int reg_num, i;
 
        /* Common settings: */
-       reg_num = ARRAY_SIZE(COMMON_INIT_TBL_VT1636);
-
-       for (i = 0; i < reg_num; i++) {
+       reg_num = ARRAY_SIZE(common_init_data);
+       for (i = 0; i < reg_num; i++)
                viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
-                                        plvds_chip_info,
-                                        COMMON_INIT_TBL_VT1636[i]);
-       }
+                       plvds_chip_info, common_init_data[i]);
 
        /* Input Data Mode Select */
-       if (plvds_setting_info->device_lcd_dualedge) {
+       if (plvds_setting_info->device_lcd_dualedge)
                viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
-                                        plvds_chip_info,
-                                        DUAL_CHANNEL_ENABLE_TBL_VT1636[0]);
-       } else {
+                       plvds_chip_info, dual_channel_enable_data);
+       else
                viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
-                                        plvds_chip_info,
-                                        SINGLE_CHANNEL_ENABLE_TBL_VT1636[0]);
-       }
+                       plvds_chip_info, single_channel_enable_data);
 
-       if (plvds_setting_info->LCDDithering) {
+       if (plvds_setting_info->LCDDithering)
                viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
-                                        plvds_chip_info,
-                                        DITHERING_ENABLE_TBL_VT1636[0]);
-       } else {
+                       plvds_chip_info, dithering_enable_data);
+       else
                viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
-                                        plvds_chip_info,
-                                        DITHERING_DISABLE_TBL_VT1636[0]);
-       }
+                       plvds_chip_info, dithering_disable_data);
 }
 
 void viafb_enable_lvds_vt1636(struct lvds_setting_information
                        *plvds_setting_info,
                        struct lvds_chip_information *plvds_chip_info)
 {
-
        viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info,
-                                VDD_ON_TBL_VT1636[0]);
-
-       /* Pad on: */
-       switch (plvds_chip_info->output_interface) {
-       case INTERFACE_DVP0:
-               {
-                       viafb_write_reg_mask(SR1E, VIASR, 0xC0, 0xC0);
-                       break;
-               }
-
-       case INTERFACE_DVP1:
-               {
-                       viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30);
-                       break;
-               }
-
-       case INTERFACE_DFP_LOW:
-               {
-                       viafb_write_reg_mask(SR2A, VIASR, 0x03, 0x03);
-                       break;
-               }
-
-       case INTERFACE_DFP_HIGH:
-               {
-                       viafb_write_reg_mask(SR2A, VIASR, 0x03, 0x0C);
-                       break;
-               }
-
-       }
+               vdd_on_data);
 }
 
 void viafb_disable_lvds_vt1636(struct lvds_setting_information
                         *plvds_setting_info,
                         struct lvds_chip_information *plvds_chip_info)
 {
-
        viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info,
-                                VDD_OFF_TBL_VT1636[0]);
-
-       /* Pad off: */
-       switch (plvds_chip_info->output_interface) {
-       case INTERFACE_DVP0:
-               {
-                       viafb_write_reg_mask(SR1E, VIASR, 0x00, 0xC0);
-                       break;
-               }
-
-       case INTERFACE_DVP1:
-               {
-                       viafb_write_reg_mask(SR1E, VIASR, 0x00, 0x30);
-                       break;
-               }
-
-       case INTERFACE_DFP_LOW:
-               {
-                       viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x03);
-                       break;
-               }
-
-       case INTERFACE_DFP_HIGH:
-               {
-                       viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x0C);
-                       break;
-               }
-
-       }
+               vdd_off_data);
 }
 
 bool viafb_lvds_identify_vt1636(u8 i2c_adapter)
index 2839e281cd6593b8f52b6a632e83b90078454ae6..b7b5014ff7140262cbbcbe32d9530cb2936105e3 100644 (file)
@@ -517,10 +517,10 @@ static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO);
 static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
 static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
 static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
-static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUGO);
+static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP);
 
 static struct attribute *w1_master_default_attrs[] = {
        &w1_master_attribute_name.attr,
index b5e582bd769dd974fcb671adfac389f6c72d02e1..97673c955484463e4181d4e3f689742bbe558a06 100644 (file)
@@ -53,7 +53,6 @@ config EXPORTFS
 config FILE_LOCKING
        bool "Enable POSIX file locking API" if EMBEDDED
        default y
-       select BKL # while lockd still uses it.
        help
          This option enables standard file locking support, required
           for filesystems like NFS and for the flock() system
index bb4cc5b8abc85f6895306c7740a837e218e050a7..79e2ca7973b7a2b503a7f7fc168f716a5f6c0284 100644 (file)
@@ -42,7 +42,7 @@ config BINFMT_ELF_FDPIC
 
 config CORE_DUMP_DEFAULT_ELF_HEADERS
        bool "Write ELF core dumps with partial segments"
-       default n
+       default y
        depends on BINFMT_ELF && ELF_CORE
        help
          ELF core dump files describe each memory mapping of the crashed
@@ -60,7 +60,7 @@ config CORE_DUMP_DEFAULT_ELF_HEADERS
          inherited.  See Documentation/filesystems/proc.txt for details.
 
          This config option changes the default setting of coredump_filter
-         seen at boot time.  If unsure, say N.
+         seen at boot time.  If unsure, say Y.
 
 config BINFMT_FLAT
        bool "Kernel support for flat binaries"
index 256bb7bb102a0d2221fe4b9b8d2b533949d069c1..8cf07242067de739fb4c39a9bf607c855a785101 100644 (file)
@@ -77,9 +77,6 @@
 /* Maximum number of nesting allowed inside epoll sets */
 #define EP_MAX_NESTS 4
 
-/* Maximum msec timeout value storeable in a long int */
-#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
-
 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
 
 #define EP_UNACTIVE_PTR ((void *) -1L)
@@ -1117,18 +1114,22 @@ static int ep_send_events(struct eventpoll *ep,
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                   int maxevents, long timeout)
 {
-       int res, eavail;
+       int res, eavail, timed_out = 0;
        unsigned long flags;
-       long jtimeout;
+       long slack;
        wait_queue_t wait;
-
-       /*
-        * Calculate the timeout by checking for the "infinite" value (-1)
-        * and the overflow condition. The passed timeout is in milliseconds,
-        * that why (t * HZ) / 1000.
-        */
-       jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
-               MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
+       struct timespec end_time;
+       ktime_t expires, *to = NULL;
+
+       if (timeout > 0) {
+               ktime_get_ts(&end_time);
+               timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+               slack = select_estimate_accuracy(&end_time);
+               to = &expires;
+               *to = timespec_to_ktime(end_time);
+       } else if (timeout == 0) {
+               timed_out = 1;
+       }
 
 retry:
        spin_lock_irqsave(&ep->lock, flags);
@@ -1150,7 +1151,7 @@ retry:
                         * to TASK_INTERRUPTIBLE before doing the checks.
                         */
                        set_current_state(TASK_INTERRUPTIBLE);
-                       if (!list_empty(&ep->rdllist) || !jtimeout)
+                       if (!list_empty(&ep->rdllist) || timed_out)
                                break;
                        if (signal_pending(current)) {
                                res = -EINTR;
@@ -1158,7 +1159,9 @@ retry:
                        }
 
                        spin_unlock_irqrestore(&ep->lock, flags);
-                       jtimeout = schedule_timeout(jtimeout);
+                       if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                               timed_out = 1;
+
                        spin_lock_irqsave(&ep->lock, flags);
                }
                __remove_wait_queue(&ep->wq, &wait);
@@ -1176,7 +1179,7 @@ retry:
         * more luck.
         */
        if (!res && eavail &&
-           !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
+           !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
                goto retry;
 
        return res;
index 3aa75b8888a14589f268f54a8a7b57c490cbcf9c..99d33a1371e9aeaf7298c4548ed18a634b9f2427 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -66,6 +66,12 @@ char core_pattern[CORENAME_MAX_SIZE] = "core";
 unsigned int core_pipe_limit;
 int suid_dumpable = 0;
 
+struct core_name {
+       char *corename;
+       int used, size;
+};
+static atomic_t call_count = ATOMIC_INIT(1);
+
 /* The maximal length of core_pattern is also specified in sysctl.c */
 
 static LIST_HEAD(formats);
@@ -1003,7 +1009,7 @@ int flush_old_exec(struct linux_binprm * bprm)
 
        bprm->mm = NULL;                /* We're using it now */
 
-       current->flags &= ~PF_RANDOMIZE;
+       current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
        flush_thread();
        current->personality &= ~bprm->per_clear;
 
@@ -1083,14 +1089,14 @@ EXPORT_SYMBOL(setup_new_exec);
  */
 int prepare_bprm_creds(struct linux_binprm *bprm)
 {
-       if (mutex_lock_interruptible(&current->cred_guard_mutex))
+       if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
                return -ERESTARTNOINTR;
 
        bprm->cred = prepare_exec_creds();
        if (likely(bprm->cred))
                return 0;
 
-       mutex_unlock(&current->cred_guard_mutex);
+       mutex_unlock(&current->signal->cred_guard_mutex);
        return -ENOMEM;
 }
 
@@ -1098,7 +1104,7 @@ void free_bprm(struct linux_binprm *bprm)
 {
        free_arg_pages(bprm);
        if (bprm->cred) {
-               mutex_unlock(&current->cred_guard_mutex);
+               mutex_unlock(&current->signal->cred_guard_mutex);
                abort_creds(bprm->cred);
        }
        kfree(bprm);
@@ -1119,13 +1125,13 @@ void install_exec_creds(struct linux_binprm *bprm)
         * credentials; any time after this it may be unlocked.
         */
        security_bprm_committed_creds(bprm);
-       mutex_unlock(&current->cred_guard_mutex);
+       mutex_unlock(&current->signal->cred_guard_mutex);
 }
 EXPORT_SYMBOL(install_exec_creds);
 
 /*
  * determine how safe it is to execute the proposed program
- * - the caller must hold current->cred_guard_mutex to protect against
+ * - the caller must hold ->cred_guard_mutex to protect against
  *   PTRACE_ATTACH
  */
 int check_unsafe_exec(struct linux_binprm *bprm)
@@ -1406,7 +1412,6 @@ int do_execve(const char * filename,
        if (retval < 0)
                goto out;
 
-       current->flags &= ~PF_KTHREAD;
        retval = search_binary_handler(bprm,regs);
        if (retval < 0)
                goto out;
@@ -1459,127 +1464,148 @@ void set_binfmt(struct linux_binfmt *new)
 
 EXPORT_SYMBOL(set_binfmt);
 
+static int expand_corename(struct core_name *cn)
+{
+       char *old_corename = cn->corename;
+
+       cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
+       cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+       if (!cn->corename) {
+               kfree(old_corename);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int cn_printf(struct core_name *cn, const char *fmt, ...)
+{
+       char *cur;
+       int need;
+       int ret;
+       va_list arg;
+
+       va_start(arg, fmt);
+       need = vsnprintf(NULL, 0, fmt, arg);
+       va_end(arg);
+
+       if (likely(need < cn->size - cn->used - 1))
+               goto out_printf;
+
+       ret = expand_corename(cn);
+       if (ret)
+               goto expand_fail;
+
+out_printf:
+       cur = cn->corename + cn->used;
+       va_start(arg, fmt);
+       vsnprintf(cur, need + 1, fmt, arg);
+       va_end(arg);
+       cn->used += need;
+       return 0;
+
+expand_fail:
+       return ret;
+}
+
 /* format_corename will inspect the pattern parameter, and output a
  * name into corename, which must have space for at least
  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  */
-static int format_corename(char *corename, long signr)
+static int format_corename(struct core_name *cn, long signr)
 {
        const struct cred *cred = current_cred();
        const char *pat_ptr = core_pattern;
        int ispipe = (*pat_ptr == '|');
-       char *out_ptr = corename;
-       char *const out_end = corename + CORENAME_MAX_SIZE;
-       int rc;
        int pid_in_pattern = 0;
+       int err = 0;
+
+       cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
+       cn->corename = kmalloc(cn->size, GFP_KERNEL);
+       cn->used = 0;
+
+       if (!cn->corename)
+               return -ENOMEM;
 
        /* Repeat as long as we have more pattern to process and more output
           space */
        while (*pat_ptr) {
                if (*pat_ptr != '%') {
-                       if (out_ptr == out_end)
+                       if (*pat_ptr == 0)
                                goto out;
-                       *out_ptr++ = *pat_ptr++;
+                       err = cn_printf(cn, "%c", *pat_ptr++);
                } else {
                        switch (*++pat_ptr) {
+                       /* single % at the end, drop that */
                        case 0:
                                goto out;
                        /* Double percent, output one percent */
                        case '%':
-                               if (out_ptr == out_end)
-                                       goto out;
-                               *out_ptr++ = '%';
+                               err = cn_printf(cn, "%c", '%');
                                break;
                        /* pid */
                        case 'p':
                                pid_in_pattern = 1;
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", task_tgid_vnr(current));
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%d",
+                                             task_tgid_vnr(current));
                                break;
                        /* uid */
                        case 'u':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", cred->uid);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%d", cred->uid);
                                break;
                        /* gid */
                        case 'g':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%d", cred->gid);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%d", cred->gid);
                                break;
                        /* signal that caused the coredump */
                        case 's':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%ld", signr);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%ld", signr);
                                break;
                        /* UNIX time of coredump */
                        case 't': {
                                struct timeval tv;
                                do_gettimeofday(&tv);
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%lu", tv.tv_sec);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%lu", tv.tv_sec);
                                break;
                        }
                        /* hostname */
                        case 'h':
                                down_read(&uts_sem);
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%s", utsname()->nodename);
+                               err = cn_printf(cn, "%s",
+                                             utsname()->nodename);
                                up_read(&uts_sem);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
                                break;
                        /* executable */
                        case 'e':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%s", current->comm);
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%s", current->comm);
                                break;
                        /* core limit size */
                        case 'c':
-                               rc = snprintf(out_ptr, out_end - out_ptr,
-                                             "%lu", rlimit(RLIMIT_CORE));
-                               if (rc > out_end - out_ptr)
-                                       goto out;
-                               out_ptr += rc;
+                               err = cn_printf(cn, "%lu",
+                                             rlimit(RLIMIT_CORE));
                                break;
                        default:
                                break;
                        }
                        ++pat_ptr;
                }
+
+               if (err)
+                       return err;
        }
+
        /* Backward compatibility with core_uses_pid:
         *
         * If core_pattern does not include a %p (as is the default)
         * and core_uses_pid is set, then .%pid will be appended to
         * the filename. Do not do this for piped commands. */
        if (!ispipe && !pid_in_pattern && core_uses_pid) {
-               rc = snprintf(out_ptr, out_end - out_ptr,
-                             ".%d", task_tgid_vnr(current));
-               if (rc > out_end - out_ptr)
-                       goto out;
-               out_ptr += rc;
+               err = cn_printf(cn, ".%d", task_tgid_vnr(current));
+               if (err)
+                       return err;
        }
 out:
-       *out_ptr = 0;
        return ispipe;
 }
 
@@ -1856,7 +1882,7 @@ static int umh_pipe_setup(struct subprocess_info *info)
 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
 {
        struct core_state core_state;
-       char corename[CORENAME_MAX_SIZE + 1];
+       struct core_name cn;
        struct mm_struct *mm = current->mm;
        struct linux_binfmt * binfmt;
        const struct cred *old_cred;
@@ -1911,7 +1937,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
         */
        clear_thread_flag(TIF_SIGPENDING);
 
-       ispipe = format_corename(corename, signr);
+       ispipe = format_corename(&cn, signr);
+
+       if (ispipe == -ENOMEM) {
+               printk(KERN_WARNING "format_corename failed\n");
+               printk(KERN_WARNING "Aborting core\n");
+               goto fail_corename;
+       }
 
        if (ispipe) {
                int dump_count;
@@ -1948,7 +1980,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
                        goto fail_dropcount;
                }
 
-               helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
+               helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
                if (!helper_argv) {
                        printk(KERN_WARNING "%s failed to allocate memory\n",
                               __func__);
@@ -1961,7 +1993,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
                argv_free(helper_argv);
                if (retval) {
                        printk(KERN_INFO "Core dump to %s pipe failed\n",
-                              corename);
+                              cn.corename);
                        goto close_fail;
                }
        } else {
@@ -1970,7 +2002,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
                if (cprm.limit < binfmt->min_coredump)
                        goto fail_unlock;
 
-               cprm.file = filp_open(corename,
+               cprm.file = filp_open(cn.corename,
                                 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
                                 0600);
                if (IS_ERR(cprm.file))
@@ -2012,6 +2044,8 @@ fail_dropcount:
        if (ispipe)
                atomic_dec(&core_dump_count);
 fail_unlock:
+       kfree(cn.corename);
+fail_corename:
        coredump_finish(mm);
        revert_creds(old_cred);
 fail_creds:
index c6c684b44ea1ca6386105496c3f5a349987fef40..0d06f4e75699cf827b7528d07f31b40223eb49b1 100644 (file)
@@ -646,10 +646,9 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
        return here;
 }
 
-/*
+/**
  * ext2_try_to_allocate()
  * @sb:                        superblock
- * @handle:            handle to this transaction
  * @group:             given allocation block group
  * @bitmap_bh:         bufferhead holds the block bitmap
  * @grp_goal:          given target block within the group
index 4a32511f4deda7f75b45aeb719d41407e416cb55..b3db22649426262e902f66f6c46275750c781e5b 100644 (file)
@@ -792,9 +792,9 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
        if (here < 0)
                here = 0;
 
-       p = ((char *)bh->b_data) + (here >> 3);
+       p = bh->b_data + (here >> 3);
        r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
-       next = (r - ((char *)bh->b_data)) << 3;
+       next = (r - bh->b_data) << 3;
 
        if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
                return next;
@@ -810,8 +810,9 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
 
 /**
  * claim_block()
+ * @lock:              the spin lock for this block group
  * @block:             the free block (group relative) to allocate
- * @bh:                        the bufferhead containts the block group bitmap
+ * @bh:                        the buffer_head contains the block group bitmap
  *
  * We think we can allocate this block in this bitmap.  Try to set the bit.
  * If that succeeds then check that nobody has allocated and then freed the
@@ -956,9 +957,11 @@ fail_access:
  *             but we will shift to the place where start_block is,
  *             then start from there, when looking for a reservable space.
  *
- *     @size: the target new reservation window size
+ *     @my_rsv: the reservation window
  *
- *     @group_first_block: the first block we consider to start
+ *     @sb: the super block
+ *
+ *     @start_block: the first block we consider to start
  *                     the real search from
  *
  *     @last_block:
@@ -1084,7 +1087,7 @@ static int find_next_reservable_window(
  *
  *     failed: we failed to find a reservation window in this group
  *
- *     @rsv: the reservation
+ *     @my_rsv: the reservation window
  *
  *     @grp_goal: The goal (group-relative).  It is where the search for a
  *             free reservable space should start from.
@@ -1273,8 +1276,8 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
  * @group:             given allocation block group
  * @bitmap_bh:         bufferhead holds the block bitmap
  * @grp_goal:          given target block within the group
- * @count:             target number of blocks to allocate
  * @my_rsv:            reservation window
+ * @count:             target number of blocks to allocate
  * @errp:              pointer to store the error code
  *
  * This is the main function used to allocate a new block and its reservation
index 4ab72db3559e72c5a959b9777cab365a6ce18c87..9724aef224600c17faf288464d43cb7c5766ad3e 100644 (file)
@@ -570,9 +570,14 @@ got:
        ei->i_state_flags = 0;
        ext3_set_inode_state(inode, EXT3_STATE_NEW);
 
-       ei->i_extra_isize =
-               (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
-               sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
+       /* See comment in ext3_iget for explanation */
+       if (ino >= EXT3_FIRST_INO(sb) + 1 &&
+           EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
+               ei->i_extra_isize =
+                       sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
+       } else {
+               ei->i_extra_isize = 0;
+       }
 
        ret = inode;
        dquot_initialize(inode);
index ad05353040a12338c1a0c84626abcfc20f96408f..a9580617edd24dc4b4e94df75265f1ce25cec8f1 100644 (file)
@@ -498,7 +498,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
 }
 
 /**
- *     ext3_blks_to_allocate: Look up the block map and count the number
+ *     ext3_blks_to_allocate - Look up the block map and count the number
  *     of direct blocks need to be allocated for the given branch.
  *
  *     @branch: chain of indirect blocks
@@ -536,14 +536,18 @@ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
 }
 
 /**
- *     ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ *     ext3_alloc_blocks - multiple allocate blocks needed for a branch
+ *     @handle: handle for this transaction
+ *     @inode: owner
+ *     @goal: preferred place for allocation
  *     @indirect_blks: the number of blocks need to allocate for indirect
  *                     blocks
- *
+ *     @blks:  number of blocks need to allocated for direct blocks
  *     @new_blocks: on return it will store the new block numbers for
  *     the indirect blocks(if needed) and the first direct block,
- *     @blks:  on return it will store the total number of allocated
- *             direct blocks
+ *     @err: here we store the error value
+ *
+ *     return the number of direct blocks allocated
  */
 static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
                        ext3_fsblk_t goal, int indirect_blks, int blks,
@@ -598,9 +602,11 @@ failed_out:
 
 /**
  *     ext3_alloc_branch - allocate and set up a chain of blocks.
+ *     @handle: handle for this transaction
  *     @inode: owner
  *     @indirect_blks: number of allocated indirect blocks
  *     @blks: number of allocated direct blocks
+ *     @goal: preferred place for allocation
  *     @offsets: offsets (in the blocks) to store the pointers to next.
  *     @branch: place to store the chain in.
  *
@@ -700,10 +706,9 @@ failed:
 
 /**
  * ext3_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
  * @inode: owner
  * @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- *     ext3_alloc_branch)
  * @where: location of missing link
  * @num:   number of indirect blocks we are adding
  * @blks:  number of direct blocks we are adding
@@ -2530,7 +2535,6 @@ void ext3_truncate(struct inode *inode)
                         */
                } else {
                        /* Shared branch grows from an indirect block */
-                       BUFFER_TRACE(partial->bh, "get_write_access");
                        ext3_free_branches(handle, inode, partial->bh,
                                        partial->p,
                                        partial->p+1, (chain+n-1) - partial);
index 0ccd7b12b73cb4fa25918a6dcdc81ac47a92dce1..e746d30b12320dff22b76d12aa41109a9111ab0e 100644 (file)
@@ -977,7 +977,8 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
        o_blocks_count = le32_to_cpu(es->s_blocks_count);
 
        if (test_opt(sb, DEBUG))
-               printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
+               printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
+                      " upto "E3FSBLK" blocks\n",
                       o_blocks_count, n_blocks_count);
 
        if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
@@ -985,7 +986,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
 
        if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
                printk(KERN_ERR "EXT3-fs: filesystem on %s:"
-                       " too large to resize to %lu blocks safely\n",
+                       " too large to resize to "E3FSBLK" blocks safely\n",
                        sb->s_id, n_blocks_count);
                if (sizeof(sector_t) < 8)
                        ext3_warning(sb, __func__,
@@ -1065,11 +1066,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
        es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
        ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
        mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-       ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
-                  o_blocks_count + add);
+       ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
+                  o_blocks_count, o_blocks_count + add);
        ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
-       ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count,
-                  o_blocks_count + add);
+       ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
+                  o_blocks_count, o_blocks_count + add);
        if ((err = ext3_journal_stop(handle)))
                goto exit_put;
        if (test_opt(sb, DEBUG))
index 37776800910670e2a675e4fd11378f3d8eb35446..db87413d3479c15ecc36223ad4b02c8c6c06f07e 100644 (file)
@@ -1301,9 +1301,9 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
                ext3_msg(sb, KERN_WARNING,
                        "warning: mounting fs with errors, "
                        "running e2fsck is recommended");
-       else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
+       else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
                 le16_to_cpu(es->s_mnt_count) >=
-                (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
+                       le16_to_cpu(es->s_max_mnt_count))
                ext3_msg(sb, KERN_WARNING,
                        "warning: maximal mount count reached, "
                        "running e2fsck is recommended");
@@ -1320,7 +1320,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
                    valid forever! :) */
        es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
 #endif
-       if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
+       if (!le16_to_cpu(es->s_max_mnt_count))
                es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
        le16_add_cpu(&es->s_mnt_count, 1);
        es->s_mtime = cpu_to_le32(get_seconds());
@@ -1647,7 +1647,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
         * Note: s_es must be initialized as soon as possible because
         *       some ext3 macro-instructions depend on its value
         */
-       es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
+       es = (struct ext3_super_block *) (bh->b_data + offset);
        sbi->s_es = es;
        sb->s_magic = le16_to_cpu(es->s_magic);
        if (sb->s_magic != EXT3_SUPER_MAGIC)
@@ -1758,7 +1758,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
                               "error: can't read superblock on 2nd try");
                        goto failed_mount;
                }
-               es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
+               es = (struct ext3_super_block *)(bh->b_data + offset);
                sbi->s_es = es;
                if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
                        ext3_msg(sb, KERN_ERR,
@@ -1857,13 +1857,13 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
        sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                               le32_to_cpu(es->s_first_data_block) - 1)
                                       / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
-       db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
-                  EXT3_DESC_PER_BLOCK(sb);
+       db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
        sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
                                    GFP_KERNEL);
        if (sbi->s_group_desc == NULL) {
                ext3_msg(sb, KERN_ERR,
                        "error: not enough memory");
+               ret = -ENOMEM;
                goto failed_mount;
        }
 
@@ -1951,6 +1951,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
        }
        if (err) {
                ext3_msg(sb, KERN_ERR, "error: insufficient memory");
+               ret = err;
                goto failed_mount3;
        }
 
@@ -2159,7 +2160,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
                goto out_bdev;
        }
 
-       es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
+       es = (struct ext3_super_block *) (bh->b_data + offset);
        if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
            !(le32_to_cpu(es->s_feature_incompat) &
              EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
@@ -2352,6 +2353,21 @@ static int ext3_commit_super(struct super_block *sb,
 
        if (!sbh)
                return error;
+
+       if (buffer_write_io_error(sbh)) {
+               /*
+                * Oh, dear.  A previous attempt to write the
+                * superblock failed.  This could happen because the
+                * USB device was yanked out.  Or it could happen to
+                * be a transient write error and maybe the block will
+                * be remapped.  Nothing we can do but to retry the
+                * write and hope for the best.
+                */
+               ext3_msg(sb, KERN_ERR, "previous I/O error to "
+                      "superblock detected");
+               clear_buffer_write_io_error(sbh);
+               set_buffer_uptodate(sbh);
+       }
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
@@ -2368,8 +2384,15 @@ static int ext3_commit_super(struct super_block *sb,
        es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
        BUFFER_TRACE(sbh, "marking dirty");
        mark_buffer_dirty(sbh);
-       if (sync)
+       if (sync) {
                error = sync_dirty_buffer(sbh);
+               if (buffer_write_io_error(sbh)) {
+                       ext3_msg(sb, KERN_ERR, "I/O error while writing "
+                              "superblock");
+                       clear_buffer_write_io_error(sbh);
+                       set_buffer_uptodate(sbh);
+               }
+       }
        return error;
 }
 
index 8867b2a1e5fe0ef2b52080b382c83c7f7f4339d4..c947e36eda6c9d568b0c00b28c778ac2216996d5 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_EXT4_FS) += ext4.o
 
-ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
                ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
                ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
 
index bd30799a43ed0c527a80b024c356eb55e457f953..14c3af26c671eca992e51841682924ebe9a95328 100644 (file)
@@ -171,7 +171,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
                 * less than the blocksize * 8 ( which is the size
                 * of bitmap ), set rest of the block bitmap to 1
                 */
-               mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
+               ext4_mark_bitmap_end(group_blocks, sb->s_blocksize * 8,
+                                    bh->b_data);
        }
        return free_blocks - ext4_group_used_meta_blocks(sb, block_group, gdp);
 }
@@ -489,7 +490,7 @@ error_return:
  * Check if filesystem has nblocks free & available for allocation.
  * On success return 1, return 0 on failure.
  */
-int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
+static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
 {
        s64 free_blocks, dirty_blocks, root_blocks;
        struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
index 3db5084db9bd06c2d492cd76dd2a014e7bb07bf6..fac90f3fba80759b5e42c6902dba00c8c16d1286 100644 (file)
@@ -29,16 +29,15 @@ struct ext4_system_zone {
 
 static struct kmem_cache *ext4_system_zone_cachep;
 
-int __init init_ext4_system_zone(void)
+int __init ext4_init_system_zone(void)
 {
-       ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone,
-                                            SLAB_RECLAIM_ACCOUNT);
+       ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0);
        if (ext4_system_zone_cachep == NULL)
                return -ENOMEM;
        return 0;
 }
 
-void exit_ext4_system_zone(void)
+void ext4_exit_system_zone(void)
 {
        kmem_cache_destroy(ext4_system_zone_cachep);
 }
index 374510f72baaaa02b3f8768ee4b5d8261ab1080c..ece76fb6a40cefb559ee4db138dfe28c7ef3b348 100644 (file)
@@ -39,7 +39,7 @@ static int ext4_release_dir(struct inode *inode,
                                struct file *filp);
 
 const struct file_operations ext4_dir_operations = {
-       .llseek         = generic_file_llseek,
+       .llseek         = ext4_llseek,
        .read           = generic_read_dir,
        .readdir        = ext4_readdir,         /* we take BKL. needed?*/
        .unlocked_ioctl = ext4_ioctl,
index 889ec9d5e6adfe5b62acf19b29fd37a7faf69f08..8b5dd6369f82c19d5ba28dea07018e4fb13a025f 100644 (file)
@@ -168,7 +168,20 @@ struct mpage_da_data {
        int pages_written;
        int retval;
 };
-#define        EXT4_IO_UNWRITTEN       0x1
+
+/*
+ * Flags for ext4_io_end->flags
+ */
+#define        EXT4_IO_END_UNWRITTEN   0x0001
+#define EXT4_IO_END_ERROR      0x0002
+
+struct ext4_io_page {
+       struct page     *p_page;
+       int             p_count;
+};
+
+#define MAX_IO_PAGES 128
+
 typedef struct ext4_io_end {
        struct list_head        list;           /* per-file finished IO list */
        struct inode            *inode;         /* file being written to */
@@ -179,8 +192,18 @@ typedef struct ext4_io_end {
        struct work_struct      work;           /* data work queue */
        struct kiocb            *iocb;          /* iocb struct for AIO */
        int                     result;         /* error value for AIO */
+       int                     num_io_pages;
+       struct ext4_io_page     *pages[MAX_IO_PAGES];
 } ext4_io_end_t;
 
+struct ext4_io_submit {
+       int                     io_op;
+       struct bio              *io_bio;
+       ext4_io_end_t           *io_end;
+       struct ext4_io_page     *io_page;
+       sector_t                io_next_block;
+};
+
 /*
  * Special inodes numbers
  */
@@ -205,6 +228,7 @@ typedef struct ext4_io_end {
 #define EXT4_MIN_BLOCK_SIZE            1024
 #define        EXT4_MAX_BLOCK_SIZE             65536
 #define EXT4_MIN_BLOCK_LOG_SIZE                10
+#define EXT4_MAX_BLOCK_LOG_SIZE                16
 #ifdef __KERNEL__
 # define EXT4_BLOCK_SIZE(s)            ((s)->s_blocksize)
 #else
@@ -889,6 +913,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_DATA_ERR_ABORT      0x10000000 /* Abort on file data write */
 #define EXT4_MOUNT_BLOCK_VALIDITY      0x20000000 /* Block validity checking */
 #define EXT4_MOUNT_DISCARD             0x40000000 /* Issue DISCARD requests */
+#define EXT4_MOUNT_INIT_INODE_TABLE    0x80000000 /* Initialize uninitialized itables */
 
 #define clear_opt(o, opt)              o &= ~EXT4_MOUNT_##opt
 #define set_opt(o, opt)                        o |= EXT4_MOUNT_##opt
@@ -1087,7 +1112,6 @@ struct ext4_sb_info {
        struct completion s_kobj_unregister;
 
        /* Journaling */
-       struct inode *s_journal_inode;
        struct journal_s *s_journal;
        struct list_head s_orphan;
        struct mutex s_orphan_lock;
@@ -1120,10 +1144,7 @@ struct ext4_sb_info {
        /* for buddy allocator */
        struct ext4_group_info ***s_group_info;
        struct inode *s_buddy_cache;
-       long s_blocks_reserved;
-       spinlock_t s_reserve_lock;
        spinlock_t s_md_lock;
-       tid_t s_last_transaction;
        unsigned short *s_mb_offsets;
        unsigned int *s_mb_maxs;
 
@@ -1141,7 +1162,6 @@ struct ext4_sb_info {
        unsigned long s_mb_last_start;
 
        /* stats for buddy allocator */
-       spinlock_t s_mb_pa_lock;
        atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
        atomic_t s_bal_success; /* we found long enough chunks */
        atomic_t s_bal_allocated;       /* in blocks */
@@ -1172,6 +1192,11 @@ struct ext4_sb_info {
 
        /* timer for periodic error stats printing */
        struct timer_list s_err_report;
+
+       /* Lazy inode table initialization info */
+       struct ext4_li_request *s_li_request;
+       /* Wait multiplier for lazy initialization thread */
+       unsigned int s_li_wait_mult;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1533,7 +1558,42 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
                        ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
 
-extern struct proc_dir_entry *ext4_proc_root;
+/*
+ * Timeout and state flag for lazy initialization inode thread.
+ */
+#define EXT4_DEF_LI_WAIT_MULT                  10
+#define EXT4_DEF_LI_MAX_START_DELAY            5
+#define EXT4_LAZYINIT_QUIT                     0x0001
+#define EXT4_LAZYINIT_RUNNING                  0x0002
+
+/*
+ * Lazy inode table initialization info
+ */
+struct ext4_lazy_init {
+       unsigned long           li_state;
+
+       wait_queue_head_t       li_wait_daemon;
+       wait_queue_head_t       li_wait_task;
+       struct timer_list       li_timer;
+       struct task_struct      *li_task;
+
+       struct list_head        li_request_list;
+       struct mutex            li_list_mtx;
+};
+
+struct ext4_li_request {
+       struct super_block      *lr_super;
+       struct ext4_sb_info     *lr_sbi;
+       ext4_group_t            lr_next_group;
+       struct list_head        lr_request;
+       unsigned long           lr_next_sched;
+       unsigned long           lr_timeout;
+};
+
+struct ext4_features {
+       struct kobject f_kobj;
+       struct completion f_kobj_unregister;
+};
 
 /*
  * Function prototypes
@@ -1561,7 +1621,6 @@ extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
 extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
                        ext4_fsblk_t goal, unsigned long *count, int *errp);
 extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
-extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
 extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
                                ext4_fsblk_t block, unsigned long count);
 extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
@@ -1605,11 +1664,9 @@ extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
 extern unsigned long ext4_count_free_inodes(struct super_block *);
 extern unsigned long ext4_count_dirs(struct super_block *);
 extern void ext4_check_inodes_bitmap(struct super_block *);
-extern unsigned ext4_init_inode_bitmap(struct super_block *sb,
-                                      struct buffer_head *bh,
-                                      ext4_group_t group,
-                                      struct ext4_group_desc *desc);
-extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+extern int ext4_init_inode_table(struct super_block *sb,
+                                ext4_group_t group, int barrier);
 
 /* mballoc.c */
 extern long ext4_mb_stats;
@@ -1620,16 +1677,15 @@ extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
                                struct ext4_allocation_request *, int *);
 extern int ext4_mb_reserve_blocks(struct super_block *, int);
 extern void ext4_discard_preallocations(struct inode *);
-extern int __init init_ext4_mballoc(void);
-extern void exit_ext4_mballoc(void);
+extern int __init ext4_init_mballoc(void);
+extern void ext4_exit_mballoc(void);
 extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
                             struct buffer_head *bh, ext4_fsblk_t block,
                             unsigned long count, int flags);
 extern int ext4_mb_add_groupinfo(struct super_block *sb,
                ext4_group_t i, struct ext4_group_desc *desc);
-extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
-extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
-                                               ext4_group_t, int);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+
 /* inode.c */
 struct buffer_head *ext4_getblk(handle_t *, struct inode *,
                                                ext4_lblk_t, int, int *);
@@ -1657,13 +1713,11 @@ extern void ext4_get_inode_flags(struct ext4_inode_info *);
 extern int ext4_alloc_da_blocks(struct inode *inode);
 extern void ext4_set_aops(struct inode *inode);
 extern int ext4_writepage_trans_blocks(struct inode *);
-extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
 extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
-extern int flush_completed_IO(struct inode *inode);
 extern void ext4_da_update_reserve_space(struct inode *inode,
                                        int used, int quota_claim);
 /* ioctl.c */
@@ -1960,6 +2014,7 @@ extern const struct file_operations ext4_dir_operations;
 /* file.c */
 extern const struct inode_operations ext4_file_inode_operations;
 extern const struct file_operations ext4_file_operations;
+extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
 
 /* namei.c */
 extern const struct inode_operations ext4_dir_inode_operations;
@@ -1973,8 +2028,8 @@ extern const struct inode_operations ext4_fast_symlink_inode_operations;
 /* block_validity */
 extern void ext4_release_system_zone(struct super_block *sb);
 extern int ext4_setup_system_zone(struct super_block *sb);
-extern int __init init_ext4_system_zone(void);
-extern void exit_ext4_system_zone(void);
+extern int __init ext4_init_system_zone(void);
+extern void ext4_exit_system_zone(void);
 extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
                                 ext4_fsblk_t start_blk,
                                 unsigned int count);
@@ -2002,6 +2057,17 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
                             __u64 start_orig, __u64 start_donor,
                             __u64 len, __u64 *moved_len);
 
+/* page-io.c */
+extern int __init ext4_init_pageio(void);
+extern void ext4_exit_pageio(void);
+extern void ext4_free_io_end(ext4_io_end_t *io);
+extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
+extern int ext4_end_io_nolock(ext4_io_end_t *io);
+extern void ext4_io_submit(struct ext4_io_submit *io);
+extern int ext4_bio_write_page(struct ext4_io_submit *io,
+                              struct page *page,
+                              int len,
+                              struct writeback_control *wbc);
 
 /* BH_Uninit flag: blocks are allocated but uninitialized on disk */
 enum ext4_state_bits {
index bdb6ce7e2eb48d08c1bf006e28fdc7b27a8651dd..28ce70fd9cd03762269216aa09e47ef96f79fd5b 100644 (file)
@@ -225,11 +225,60 @@ static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
        ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
 }
 
+/*
+ * ext4_ext_pblock:
+ * combine low and high parts of physical block number into ext4_fsblk_t
+ */
+static inline ext4_fsblk_t ext4_ext_pblock(struct ext4_extent *ex)
+{
+       ext4_fsblk_t block;
+
+       block = le32_to_cpu(ex->ee_start_lo);
+       block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
+       return block;
+}
+
+/*
+ * ext4_idx_pblock:
+ * combine low and high parts of a leaf physical block number into ext4_fsblk_t
+ */
+static inline ext4_fsblk_t ext4_idx_pblock(struct ext4_extent_idx *ix)
+{
+       ext4_fsblk_t block;
+
+       block = le32_to_cpu(ix->ei_leaf_lo);
+       block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
+       return block;
+}
+
+/*
+ * ext4_ext_store_pblock:
+ * stores a large physical block number into an extent struct,
+ * breaking it into parts
+ */
+static inline void ext4_ext_store_pblock(struct ext4_extent *ex,
+                                        ext4_fsblk_t pb)
+{
+       ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+       ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) &
+                                     0xffff);
+}
+
+/*
+ * ext4_idx_store_pblock:
+ * stores a large physical block number into an index struct,
+ * breaking it into parts
+ */
+static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
+                                        ext4_fsblk_t pb)
+{
+       ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+       ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) &
+                                    0xffff);
+}
+
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
                                         sector_t lblocks);
-extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
-extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
-extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
 extern int ext4_extent_tree_init(handle_t *, struct inode *);
 extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
                                                   int num,
@@ -237,19 +286,9 @@ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
 extern int ext4_can_extents_be_merged(struct inode *inode,
                                      struct ext4_extent *ex1,
                                      struct ext4_extent *ex2);
-extern int ext4_ext_try_to_merge(struct inode *inode,
-                                struct ext4_ext_path *path,
-                                struct ext4_extent *);
-extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
 extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int);
-extern int ext4_ext_walk_space(struct inode *, ext4_lblk_t, ext4_lblk_t,
-                                                       ext_prepare_callback, void *);
 extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
                                                        struct ext4_ext_path *);
-extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *,
-                                               ext4_lblk_t *, ext4_fsblk_t *);
-extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
-                                               ext4_lblk_t *, ext4_fsblk_t *);
 extern void ext4_ext_drop_refs(struct ext4_ext_path *);
 extern int ext4_ext_check_inode(struct inode *inode);
 #endif /* _EXT4_EXTENTS */
index 06328d3e5717fd368ce7872fe0aa3db879cf6817..0554c48cb1fddbc97bc81c3443218b6a9e97fca8 100644 (file)
 #include "ext4_jbd2.h"
 #include "ext4_extents.h"
 
-
-/*
- * ext_pblock:
- * combine low and high parts of physical block number into ext4_fsblk_t
- */
-ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
-{
-       ext4_fsblk_t block;
-
-       block = le32_to_cpu(ex->ee_start_lo);
-       block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
-       return block;
-}
-
-/*
- * idx_pblock:
- * combine low and high parts of a leaf physical block number into ext4_fsblk_t
- */
-ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
-{
-       ext4_fsblk_t block;
-
-       block = le32_to_cpu(ix->ei_leaf_lo);
-       block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
-       return block;
-}
-
-/*
- * ext4_ext_store_pblock:
- * stores a large physical block number into an extent struct,
- * breaking it into parts
- */
-void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
-{
-       ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
-       ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
-}
-
-/*
- * ext4_idx_store_pblock:
- * stores a large physical block number into an index struct,
- * breaking it into parts
- */
-static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
-{
-       ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
-       ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
-}
-
 static int ext4_ext_truncate_extend_restart(handle_t *handle,
                                            struct inode *inode,
                                            int needed)
@@ -169,7 +120,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
                /* try to predict block placement */
                ex = path[depth].p_ext;
                if (ex)
-                       return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
+                       return (ext4_ext_pblock(ex) +
+                               (block - le32_to_cpu(ex->ee_block)));
 
                /* it looks like index is empty;
                 * try to find starting block from index itself */
@@ -354,7 +306,7 @@ ext4_ext_max_entries(struct inode *inode, int depth)
 
 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
 {
-       ext4_fsblk_t block = ext_pblock(ext);
+       ext4_fsblk_t block = ext4_ext_pblock(ext);
        int len = ext4_ext_get_actual_len(ext);
 
        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
@@ -363,7 +315,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
 static int ext4_valid_extent_idx(struct inode *inode,
                                struct ext4_extent_idx *ext_idx)
 {
-       ext4_fsblk_t block = idx_pblock(ext_idx);
+       ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
 
        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
 }
@@ -463,13 +415,13 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
        for (k = 0; k <= l; k++, path++) {
                if (path->p_idx) {
                  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
-                           idx_pblock(path->p_idx));
+                           ext4_idx_pblock(path->p_idx));
                } else if (path->p_ext) {
                        ext_debug("  %d:[%d]%d:%llu ",
                                  le32_to_cpu(path->p_ext->ee_block),
                                  ext4_ext_is_uninitialized(path->p_ext),
                                  ext4_ext_get_actual_len(path->p_ext),
-                                 ext_pblock(path->p_ext));
+                                 ext4_ext_pblock(path->p_ext));
                } else
                        ext_debug("  []");
        }
@@ -494,7 +446,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
        for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
                ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
                          ext4_ext_is_uninitialized(ex),
-                         ext4_ext_get_actual_len(ex), ext_pblock(ex));
+                         ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
        }
        ext_debug("\n");
 }
@@ -545,7 +497,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
 
        path->p_idx = l - 1;
        ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
-                 idx_pblock(path->p_idx));
+                 ext4_idx_pblock(path->p_idx));
 
 #ifdef CHECK_BINSEARCH
        {
@@ -614,7 +566,7 @@ ext4_ext_binsearch(struct inode *inode,
        path->p_ext = l - 1;
        ext_debug("  -> %d:%llu:[%d]%d ",
                        le32_to_cpu(path->p_ext->ee_block),
-                       ext_pblock(path->p_ext),
+                       ext4_ext_pblock(path->p_ext),
                        ext4_ext_is_uninitialized(path->p_ext),
                        ext4_ext_get_actual_len(path->p_ext));
 
@@ -682,7 +634,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                          ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
 
                ext4_ext_binsearch_idx(inode, path + ppos, block);
-               path[ppos].p_block = idx_pblock(path[ppos].p_idx);
+               path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
                path[ppos].p_depth = i;
                path[ppos].p_ext = NULL;
 
@@ -721,7 +673,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
        ext4_ext_binsearch(inode, path + ppos, block);
        /* if not an empty leaf */
        if (path[ppos].p_ext)
-               path[ppos].p_block = ext_pblock(path[ppos].p_ext);
+               path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
 
        ext4_ext_show_path(inode, path);
 
@@ -739,9 +691,9 @@ err:
  * insert new index [@logical;@ptr] into the block at @curp;
  * check where to insert: before @curp or after @curp
  */
-int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
-                               struct ext4_ext_path *curp,
-                               int logical, ext4_fsblk_t ptr)
+static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+                                struct ext4_ext_path *curp,
+                                int logical, ext4_fsblk_t ptr)
 {
        struct ext4_extent_idx *ix;
        int len, err;
@@ -917,7 +869,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                        EXT_MAX_EXTENT(path[depth].p_hdr)) {
                ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
                                le32_to_cpu(path[depth].p_ext->ee_block),
-                               ext_pblock(path[depth].p_ext),
+                               ext4_ext_pblock(path[depth].p_ext),
                                ext4_ext_is_uninitialized(path[depth].p_ext),
                                ext4_ext_get_actual_len(path[depth].p_ext),
                                newblock);
@@ -1007,7 +959,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
                        ext_debug("%d: move %d:%llu in new index %llu\n", i,
                                        le32_to_cpu(path[i].p_idx->ei_block),
-                                       idx_pblock(path[i].p_idx),
+                                       ext4_idx_pblock(path[i].p_idx),
                                        newblock);
                        /*memmove(++fidx, path[i].p_idx++,
                                        sizeof(struct ext4_extent_idx));
@@ -1146,7 +1098,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
                  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
                  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
-                 idx_pblock(EXT_FIRST_INDEX(neh)));
+                 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
 
        neh->eh_depth = cpu_to_le16(path->p_depth + 1);
        err = ext4_ext_dirty(handle, inode, curp);
@@ -1232,9 +1184,9 @@ out:
  * returns 0 at @phys
  * return value contains 0 (success) or error code
  */
-int
-ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
-                       ext4_lblk_t *logical, ext4_fsblk_t *phys)
+static int ext4_ext_search_left(struct inode *inode,
+                               struct ext4_ext_path *path,
+                               ext4_lblk_t *logical, ext4_fsblk_t *phys)
 {
        struct ext4_extent_idx *ix;
        struct ext4_extent *ex;
@@ -1286,7 +1238,7 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
        }
 
        *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
-       *phys = ext_pblock(ex) + ee_len - 1;
+       *phys = ext4_ext_pblock(ex) + ee_len - 1;
        return 0;
 }
 
@@ -1297,9 +1249,9 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
  * returns 0 at @phys
  * return value contains 0 (success) or error code
  */
-int
-ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
-                       ext4_lblk_t *logical, ext4_fsblk_t *phys)
+static int ext4_ext_search_right(struct inode *inode,
+                                struct ext4_ext_path *path,
+                                ext4_lblk_t *logical, ext4_fsblk_t *phys)
 {
        struct buffer_head *bh = NULL;
        struct ext4_extent_header *eh;
@@ -1342,7 +1294,7 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
                        }
                }
                *logical = le32_to_cpu(ex->ee_block);
-               *phys = ext_pblock(ex);
+               *phys = ext4_ext_pblock(ex);
                return 0;
        }
 
@@ -1357,7 +1309,7 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
                /* next allocated block in this leaf */
                ex++;
                *logical = le32_to_cpu(ex->ee_block);
-               *phys = ext_pblock(ex);
+               *phys = ext4_ext_pblock(ex);
                return 0;
        }
 
@@ -1376,7 +1328,7 @@ got_index:
         * follow it and find the closest allocated
         * block to the right */
        ix++;
-       block = idx_pblock(ix);
+       block = ext4_idx_pblock(ix);
        while (++depth < path->p_depth) {
                bh = sb_bread(inode->i_sb, block);
                if (bh == NULL)
@@ -1388,7 +1340,7 @@ got_index:
                        return -EIO;
                }
                ix = EXT_FIRST_INDEX(eh);
-               block = idx_pblock(ix);
+               block = ext4_idx_pblock(ix);
                put_bh(bh);
        }
 
@@ -1402,7 +1354,7 @@ got_index:
        }
        ex = EXT_FIRST_EXTENT(eh);
        *logical = le32_to_cpu(ex->ee_block);
-       *phys = ext_pblock(ex);
+       *phys = ext4_ext_pblock(ex);
        put_bh(bh);
        return 0;
 }
@@ -1573,7 +1525,7 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
                return 0;
 #endif
 
-       if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
+       if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
                return 1;
        return 0;
 }
@@ -1585,9 +1537,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
  * 1 if they got merged.
  */
-int ext4_ext_try_to_merge(struct inode *inode,
-                         struct ext4_ext_path *path,
-                         struct ext4_extent *ex)
+static int ext4_ext_try_to_merge(struct inode *inode,
+                                struct ext4_ext_path *path,
+                                struct ext4_extent *ex)
 {
        struct ext4_extent_header *eh;
        unsigned int depth, len;
@@ -1632,9 +1584,9 @@ int ext4_ext_try_to_merge(struct inode *inode,
  * such that there will be no overlap, and then returns 1.
  * If there is no overlap found, it returns 0.
  */
-unsigned int ext4_ext_check_overlap(struct inode *inode,
-                                   struct ext4_extent *newext,
-                                   struct ext4_ext_path *path)
+static unsigned int ext4_ext_check_overlap(struct inode *inode,
+                                          struct ext4_extent *newext,
+                                          struct ext4_ext_path *path)
 {
        ext4_lblk_t b1, b2;
        unsigned int depth, len1;
@@ -1706,11 +1658,12 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
        if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
                && ext4_can_extents_be_merged(inode, ex, newext)) {
                ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
-                               ext4_ext_is_uninitialized(newext),
-                               ext4_ext_get_actual_len(newext),
-                               le32_to_cpu(ex->ee_block),
-                               ext4_ext_is_uninitialized(ex),
-                               ext4_ext_get_actual_len(ex), ext_pblock(ex));
+                         ext4_ext_is_uninitialized(newext),
+                         ext4_ext_get_actual_len(newext),
+                         le32_to_cpu(ex->ee_block),
+                         ext4_ext_is_uninitialized(ex),
+                         ext4_ext_get_actual_len(ex),
+                         ext4_ext_pblock(ex));
                err = ext4_ext_get_access(handle, inode, path + depth);
                if (err)
                        return err;
@@ -1780,7 +1733,7 @@ has_space:
                /* there is no extent in this leaf, create first one */
                ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
                                le32_to_cpu(newext->ee_block),
-                               ext_pblock(newext),
+                               ext4_ext_pblock(newext),
                                ext4_ext_is_uninitialized(newext),
                                ext4_ext_get_actual_len(newext));
                path[depth].p_ext = EXT_FIRST_EXTENT(eh);
@@ -1794,7 +1747,7 @@ has_space:
                        ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
                                        "move %d from 0x%p to 0x%p\n",
                                        le32_to_cpu(newext->ee_block),
-                                       ext_pblock(newext),
+                                       ext4_ext_pblock(newext),
                                        ext4_ext_is_uninitialized(newext),
                                        ext4_ext_get_actual_len(newext),
                                        nearex, len, nearex + 1, nearex + 2);
@@ -1808,7 +1761,7 @@ has_space:
                ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
                                "move %d from 0x%p to 0x%p\n",
                                le32_to_cpu(newext->ee_block),
-                               ext_pblock(newext),
+                               ext4_ext_pblock(newext),
                                ext4_ext_is_uninitialized(newext),
                                ext4_ext_get_actual_len(newext),
                                nearex, len, nearex + 1, nearex + 2);
@@ -1819,7 +1772,7 @@ has_space:
        le16_add_cpu(&eh->eh_entries, 1);
        nearex = path[depth].p_ext;
        nearex->ee_block = newext->ee_block;
-       ext4_ext_store_pblock(nearex, ext_pblock(newext));
+       ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
        nearex->ee_len = newext->ee_len;
 
 merge:
@@ -1845,9 +1798,9 @@ cleanup:
        return err;
 }
 
-int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
-                       ext4_lblk_t num, ext_prepare_callback func,
-                       void *cbdata)
+static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
+                              ext4_lblk_t num, ext_prepare_callback func,
+                              void *cbdata)
 {
        struct ext4_ext_path *path = NULL;
        struct ext4_ext_cache cbex;
@@ -1923,7 +1876,7 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
                } else {
                        cbex.ec_block = le32_to_cpu(ex->ee_block);
                        cbex.ec_len = ext4_ext_get_actual_len(ex);
-                       cbex.ec_start = ext_pblock(ex);
+                       cbex.ec_start = ext4_ext_pblock(ex);
                        cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
                }
 
@@ -2073,7 +2026,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
 
        /* free index block */
        path--;
-       leaf = idx_pblock(path->p_idx);
+       leaf = ext4_idx_pblock(path->p_idx);
        if (unlikely(path->p_hdr->eh_entries == 0)) {
                EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
                return -EIO;
@@ -2181,7 +2134,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
                ext4_fsblk_t start;
 
                num = le32_to_cpu(ex->ee_block) + ee_len - from;
-               start = ext_pblock(ex) + ee_len - num;
+               start = ext4_ext_pblock(ex) + ee_len - num;
                ext_debug("free last %u blocks starting %llu\n", num, start);
                ext4_free_blocks(handle, inode, 0, start, num, flags);
        } else if (from == le32_to_cpu(ex->ee_block)
@@ -2310,7 +2263,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                        goto out;
 
                ext_debug("new extent: %u:%u:%llu\n", block, num,
-                               ext_pblock(ex));
+                               ext4_ext_pblock(ex));
                ex--;
                ex_ee_block = le32_to_cpu(ex->ee_block);
                ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2421,9 +2374,9 @@ again:
                        struct buffer_head *bh;
                        /* go to the next level */
                        ext_debug("move to level %d (block %llu)\n",
-                                 i + 1, idx_pblock(path[i].p_idx));
+                                 i + 1, ext4_idx_pblock(path[i].p_idx));
                        memset(path + i + 1, 0, sizeof(*path));
-                       bh = sb_bread(sb, idx_pblock(path[i].p_idx));
+                       bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
                        if (!bh) {
                                /* should we reset i_size? */
                                err = -EIO;
@@ -2535,77 +2488,21 @@ void ext4_ext_release(struct super_block *sb)
 #endif
 }
 
-static void bi_complete(struct bio *bio, int error)
-{
-       complete((struct completion *)bio->bi_private);
-}
-
 /* FIXME!! we need to try to merge to left or right after zero-out  */
 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
 {
+       ext4_fsblk_t ee_pblock;
+       unsigned int ee_len;
        int ret;
-       struct bio *bio;
-       int blkbits, blocksize;
-       sector_t ee_pblock;
-       struct completion event;
-       unsigned int ee_len, len, done, offset;
 
-
-       blkbits   = inode->i_blkbits;
-       blocksize = inode->i_sb->s_blocksize;
        ee_len    = ext4_ext_get_actual_len(ex);
-       ee_pblock = ext_pblock(ex);
-
-       /* convert ee_pblock to 512 byte sectors */
-       ee_pblock = ee_pblock << (blkbits - 9);
-
-       while (ee_len > 0) {
-
-               if (ee_len > BIO_MAX_PAGES)
-                       len = BIO_MAX_PAGES;
-               else
-                       len = ee_len;
-
-               bio = bio_alloc(GFP_NOIO, len);
-               if (!bio)
-                       return -ENOMEM;
-
-               bio->bi_sector = ee_pblock;
-               bio->bi_bdev   = inode->i_sb->s_bdev;
-
-               done = 0;
-               offset = 0;
-               while (done < len) {
-                       ret = bio_add_page(bio, ZERO_PAGE(0),
-                                                       blocksize, offset);
-                       if (ret != blocksize) {
-                               /*
-                                * We can't add any more pages because of
-                                * hardware limitations.  Start a new bio.
-                                */
-                               break;
-                       }
-                       done++;
-                       offset += blocksize;
-                       if (offset >= PAGE_CACHE_SIZE)
-                               offset = 0;
-               }
+       ee_pblock = ext4_ext_pblock(ex);
 
-               init_completion(&event);
-               bio->bi_private = &event;
-               bio->bi_end_io = bi_complete;
-               submit_bio(WRITE, bio);
-               wait_for_completion(&event);
+       ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
+       if (ret > 0)
+               ret = 0;
 
-               if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
-                       bio_put(bio);
-                       return -EIO;
-               }
-               bio_put(bio);
-               ee_len    -= done;
-               ee_pblock += done  << (blkbits - 9);
-       }
-       return 0;
+       return ret;
 }
 
 #define EXT4_EXT_ZERO_LEN 7
@@ -2651,12 +2548,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        ee_block = le32_to_cpu(ex->ee_block);
        ee_len = ext4_ext_get_actual_len(ex);
        allocated = ee_len - (map->m_lblk - ee_block);
-       newblock = map->m_lblk - ee_block + ext_pblock(ex);
+       newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
 
        ex2 = ex;
        orig_ex.ee_block = ex->ee_block;
        orig_ex.ee_len   = cpu_to_le16(ee_len);
-       ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+       ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
 
        /*
         * It is safe to convert extent to initialized via explicit
@@ -2675,7 +2572,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                /* update the extent length and mark as initialized */
                ex->ee_block = orig_ex.ee_block;
                ex->ee_len   = orig_ex.ee_len;
-               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+               ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                ext4_ext_dirty(handle, inode, path + depth);
                /* zeroed the full extent */
                return allocated;
@@ -2710,7 +2607,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                        ex->ee_block = orig_ex.ee_block;
                        ex->ee_len   = cpu_to_le16(ee_len - allocated);
                        ext4_ext_mark_uninitialized(ex);
-                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                        ext4_ext_dirty(handle, inode, path + depth);
 
                        ex3 = &newex;
@@ -2725,7 +2622,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                                        goto fix_extent_len;
                                ex->ee_block = orig_ex.ee_block;
                                ex->ee_len   = orig_ex.ee_len;
-                               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                               ext4_ext_store_pblock(ex,
+                                       ext4_ext_pblock(&orig_ex));
                                ext4_ext_dirty(handle, inode, path + depth);
                                /* blocks available from map->m_lblk */
                                return allocated;
@@ -2782,7 +2680,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                        /* update the extent length and mark as initialized */
                        ex->ee_block = orig_ex.ee_block;
                        ex->ee_len   = orig_ex.ee_len;
-                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                        ext4_ext_dirty(handle, inode, path + depth);
                        /* zeroed the full extent */
                        /* blocks available from map->m_lblk */
@@ -2833,7 +2731,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                        /* update the extent length and mark as initialized */
                        ex->ee_block = orig_ex.ee_block;
                        ex->ee_len   = orig_ex.ee_len;
-                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                        ext4_ext_dirty(handle, inode, path + depth);
                        /* zero out the first half */
                        /* blocks available from map->m_lblk */
@@ -2902,7 +2800,7 @@ insert:
                /* update the extent length and mark as initialized */
                ex->ee_block = orig_ex.ee_block;
                ex->ee_len   = orig_ex.ee_len;
-               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+               ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                ext4_ext_dirty(handle, inode, path + depth);
                /* zero out the first half */
                return allocated;
@@ -2915,7 +2813,7 @@ out:
 fix_extent_len:
        ex->ee_block = orig_ex.ee_block;
        ex->ee_len   = orig_ex.ee_len;
-       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
        ext4_ext_mark_uninitialized(ex);
        ext4_ext_dirty(handle, inode, path + depth);
        return err;
@@ -2973,12 +2871,12 @@ static int ext4_split_unwritten_extents(handle_t *handle,
        ee_block = le32_to_cpu(ex->ee_block);
        ee_len = ext4_ext_get_actual_len(ex);
        allocated = ee_len - (map->m_lblk - ee_block);
-       newblock = map->m_lblk - ee_block + ext_pblock(ex);
+       newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
 
        ex2 = ex;
        orig_ex.ee_block = ex->ee_block;
        orig_ex.ee_len   = cpu_to_le16(ee_len);
-       ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+       ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
 
        /*
         * It is safe to convert extent to initialized via explicit
@@ -3027,7 +2925,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
                        /* update the extent length and mark as initialized */
                        ex->ee_block = orig_ex.ee_block;
                        ex->ee_len   = orig_ex.ee_len;
-                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                        ext4_ext_dirty(handle, inode, path + depth);
                        /* zeroed the full extent */
                        /* blocks available from map->m_lblk */
@@ -3099,7 +2997,7 @@ insert:
                /* update the extent length and mark as initialized */
                ex->ee_block = orig_ex.ee_block;
                ex->ee_len   = orig_ex.ee_len;
-               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+               ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
                ext4_ext_dirty(handle, inode, path + depth);
                /* zero out the first half */
                return allocated;
@@ -3112,7 +3010,7 @@ out:
 fix_extent_len:
        ex->ee_block = orig_ex.ee_block;
        ex->ee_len   = orig_ex.ee_len;
-       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
        ext4_ext_mark_uninitialized(ex);
        ext4_ext_dirty(handle, inode, path + depth);
        return err;
@@ -3180,6 +3078,57 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
                 unmap_underlying_metadata(bdev, block + i);
 }
 
+/*
+ * Handle EOFBLOCKS_FL flag, clearing it if necessary
+ */
+static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
+                             struct ext4_map_blocks *map,
+                             struct ext4_ext_path *path,
+                             unsigned int len)
+{
+       int i, depth;
+       struct ext4_extent_header *eh;
+       struct ext4_extent *ex, *last_ex;
+
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
+               return 0;
+
+       depth = ext_depth(inode);
+       eh = path[depth].p_hdr;
+       ex = path[depth].p_ext;
+
+       if (unlikely(!eh->eh_entries)) {
+               EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
+                                "EOFBLOCKS_FL set");
+               return -EIO;
+       }
+       last_ex = EXT_LAST_EXTENT(eh);
+       /*
+        * We should clear the EOFBLOCKS_FL flag if we are writing the
+        * last block in the last extent in the file.  We test this by
+        * first checking to see if the caller to
+        * ext4_ext_get_blocks() was interested in the last block (or
+        * a block beyond the last block) in the current extent.  If
+        * this turns out to be false, we can bail out from this
+        * function immediately.
+        */
+       if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
+           ext4_ext_get_actual_len(last_ex))
+               return 0;
+       /*
+        * If the caller does appear to be planning to write at or
+        * beyond the end of the current extent, we then test to see
+        * if the current extent is the last extent in the file, by
+        * checking to make sure it was reached via the rightmost node
+        * at each level of the tree.
+        */
+       for (i = depth-1; i >= 0; i--)
+               if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
+                       return 0;
+       ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
+       return ext4_mark_inode_dirty(handle, inode);
+}
+
 static int
 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                        struct ext4_map_blocks *map,
@@ -3206,7 +3155,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                 * completed
                 */
                if (io)
-                       io->flag = EXT4_IO_UNWRITTEN;
+                       io->flag = EXT4_IO_END_UNWRITTEN;
                else
                        ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
                if (ext4_should_dioread_nolock(inode))
@@ -3217,8 +3166,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
        if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
                ret = ext4_convert_unwritten_extents_endio(handle, inode,
                                                        path);
-               if (ret >= 0)
+               if (ret >= 0) {
                        ext4_update_inode_fsync_trans(handle, inode, 1);
+                       err = check_eofblocks_fl(handle, inode, map, path,
+                                                map->m_len);
+               } else
+                       err = ret;
                goto out2;
        }
        /* buffered IO case */
@@ -3244,8 +3197,13 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
 
        /* buffered write, writepage time, convert*/
        ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
-       if (ret >= 0)
+       if (ret >= 0) {
                ext4_update_inode_fsync_trans(handle, inode, 1);
+               err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
+               if (err < 0)
+                       goto out2;
+       }
+
 out:
        if (ret <= 0) {
                err = ret;
@@ -3292,6 +3250,7 @@ out2:
        }
        return err ? err : allocated;
 }
+
 /*
  * Block allocation/map/preallocation routine for extents based files
  *
@@ -3315,9 +3274,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 {
        struct ext4_ext_path *path = NULL;
        struct ext4_extent_header *eh;
-       struct ext4_extent newex, *ex, *last_ex;
+       struct ext4_extent newex, *ex;
        ext4_fsblk_t newblock;
-       int i, err = 0, depth, ret, cache_type;
+       int err = 0, depth, ret, cache_type;
        unsigned int allocated = 0;
        struct ext4_allocation_request ar;
        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3341,7 +3300,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                        /* block is already allocated */
                        newblock = map->m_lblk
                                   - le32_to_cpu(newex.ee_block)
-                                  + ext_pblock(&newex);
+                                  + ext4_ext_pblock(&newex);
                        /* number of remaining blocks in the extent */
                        allocated = ext4_ext_get_actual_len(&newex) -
                                (map->m_lblk - le32_to_cpu(newex.ee_block));
@@ -3379,7 +3338,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        ex = path[depth].p_ext;
        if (ex) {
                ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
-               ext4_fsblk_t ee_start = ext_pblock(ex);
+               ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
                unsigned short ee_len;
 
                /*
@@ -3488,7 +3447,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                 */
                if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
                        if (io)
-                               io->flag = EXT4_IO_UNWRITTEN;
+                               io->flag = EXT4_IO_END_UNWRITTEN;
                        else
                                ext4_set_inode_state(inode,
                                                     EXT4_STATE_DIO_UNWRITTEN);
@@ -3497,44 +3456,23 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                        map->m_flags |= EXT4_MAP_UNINIT;
        }
 
-       if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
-               if (unlikely(!eh->eh_entries)) {
-                       EXT4_ERROR_INODE(inode,
-                                        "eh->eh_entries == 0 and "
-                                        "EOFBLOCKS_FL set");
-                       err = -EIO;
-                       goto out2;
-               }
-               last_ex = EXT_LAST_EXTENT(eh);
-               /*
-                * If the current leaf block was reached by looking at
-                * the last index block all the way down the tree, and
-                * we are extending the inode beyond the last extent
-                * in the current leaf block, then clear the
-                * EOFBLOCKS_FL flag.
-                */
-               for (i = depth-1; i >= 0; i--) {
-                       if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
-                               break;
-               }
-               if ((i < 0) &&
-                   (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) +
-                    ext4_ext_get_actual_len(last_ex)))
-                       ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
-       }
+       err = check_eofblocks_fl(handle, inode, map, path, ar.len);
+       if (err)
+               goto out2;
+
        err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
        if (err) {
                /* free data blocks we just allocated */
                /* not a good idea to call discard here directly,
                 * but otherwise we'd need to call it every free() */
                ext4_discard_preallocations(inode);
-               ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
+               ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex),
                                 ext4_ext_get_actual_len(&newex), 0);
                goto out2;
        }
 
        /* previous routine could use block we allocated */
-       newblock = ext_pblock(&newex);
+       newblock = ext4_ext_pblock(&newex);
        allocated = ext4_ext_get_actual_len(&newex);
        if (allocated > map->m_len)
                allocated = map->m_len;
@@ -3729,7 +3667,7 @@ retry:
                        printk(KERN_ERR "%s: ext4_ext_map_blocks "
                                    "returned error inode#%lu, block=%u, "
                                    "max_blocks=%u", __func__,
-                                   inode->i_ino, block, max_blocks);
+                                   inode->i_ino, map.m_lblk, max_blocks);
 #endif
                        ext4_mark_inode_dirty(handle, inode);
                        ret2 = ext4_journal_stop(handle);
index ee92b66d45589c4ec97d6b7b2e5d93d871df5320..5a5c55ddceef2171e133867969f41dec431f2c39 100644 (file)
@@ -130,8 +130,50 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
        return dquot_file_open(inode, filp);
 }
 
+/*
+ * ext4_llseek() copied from generic_file_llseek() to handle both
+ * block-mapped and extent-mapped maxbytes values. This should
+ * otherwise be identical with generic_file_llseek().
+ */
+loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
+{
+       struct inode *inode = file->f_mapping->host;
+       loff_t maxbytes;
+
+       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+               maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+       else
+               maxbytes = inode->i_sb->s_maxbytes;
+       mutex_lock(&inode->i_mutex);
+       switch (origin) {
+       case SEEK_END:
+               offset += inode->i_size;
+               break;
+       case SEEK_CUR:
+               if (offset == 0) {
+                       mutex_unlock(&inode->i_mutex);
+                       return file->f_pos;
+               }
+               offset += file->f_pos;
+               break;
+       }
+
+       if (offset < 0 || offset > maxbytes) {
+               mutex_unlock(&inode->i_mutex);
+               return -EINVAL;
+       }
+
+       if (offset != file->f_pos) {
+               file->f_pos = offset;
+               file->f_version = 0;
+       }
+       mutex_unlock(&inode->i_mutex);
+
+       return offset;
+}
+
 const struct file_operations ext4_file_operations = {
-       .llseek         = generic_file_llseek,
+       .llseek         = ext4_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = generic_file_aio_read,
index 3f3ff5ee8f9d620b70c4d4f3adf119cde0b343af..c1a7bc923cf6084c84f32b91294ec5feecf8b92c 100644 (file)
 
 #include <trace/events/ext4.h>
 
+static void dump_completed_IO(struct inode * inode)
+{
+#ifdef EXT4_DEBUG
+       struct list_head *cur, *before, *after;
+       ext4_io_end_t *io, *io0, *io1;
+       unsigned long flags;
+
+       if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
+               ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
+               return;
+       }
+
+       ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
+       spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+       list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
+               cur = &io->list;
+               before = cur->prev;
+               io0 = container_of(before, ext4_io_end_t, list);
+               after = cur->next;
+               io1 = container_of(after, ext4_io_end_t, list);
+
+               ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+                           io, inode->i_ino, io0, io1);
+       }
+       spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
+#endif
+}
+
+/*
+ * This function is called from ext4_sync_file().
+ *
+ * When IO is completed, the work to convert unwritten extents to
+ * written is queued on workqueue but may not get immediately
+ * scheduled. When fsync is called, we need to ensure the
+ * conversion is complete before fsync returns.
+ * The inode keeps track of a list of pending/completed IO that
+ * might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents for completed IO
+ * to written.
+ * The function return the number of pending IOs on success.
+ */
+static int flush_completed_IO(struct inode *inode)
+{
+       ext4_io_end_t *io;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       unsigned long flags;
+       int ret = 0;
+       int ret2 = 0;
+
+       if (list_empty(&ei->i_completed_io_list))
+               return ret;
+
+       dump_completed_IO(inode);
+       spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+       while (!list_empty(&ei->i_completed_io_list)){
+               io = list_entry(ei->i_completed_io_list.next,
+                               ext4_io_end_t, list);
+               /*
+                * Calling ext4_end_io_nolock() to convert completed
+                * IO to written.
+                *
+                * When ext4_sync_file() is called, run_queue() may already
+                * about to flush the work corresponding to this io structure.
+                * It will be upset if it founds the io structure related
+                * to the work-to-be schedule is freed.
+                *
+                * Thus we need to keep the io structure still valid here after
+                * convertion finished. The io structure has a flag to
+                * avoid double converting from both fsync and background work
+                * queue work.
+                */
+               spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+               ret = ext4_end_io_nolock(io);
+               spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+               if (ret < 0)
+                       ret2 = ret;
+               else
+                       list_del_init(&io->list);
+       }
+       spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+       return (ret2 < 0) ? ret2 : 0;
+}
+
 /*
  * If we're not journaling and this is a just-created file, we have to
  * sync our parent directory (if it was freshly created) since
index 45853e0d1f218a673809fb23522b7bdc5966d9e0..1ce240a23ebb84963ce2126b78f126657fd7d6df 100644 (file)
@@ -50,7 +50,7 @@
  * need to use it within a single byte (to ensure we get endianness right).
  * We can use memset for the rest of the bitmap as there are no other users.
  */
-void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
 {
        int i;
 
@@ -65,9 +65,10 @@ void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
 }
 
 /* Initializes an uninitialized inode bitmap */
-unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
-                               ext4_group_t block_group,
-                               struct ext4_group_desc *gdp)
+static unsigned ext4_init_inode_bitmap(struct super_block *sb,
+                                      struct buffer_head *bh,
+                                      ext4_group_t block_group,
+                                      struct ext4_group_desc *gdp)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
 
@@ -85,7 +86,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
        }
 
        memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
-       mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+       ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
                        bh->b_data);
 
        return EXT4_INODES_PER_GROUP(sb);
@@ -107,6 +108,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        desc = ext4_get_group_desc(sb, block_group, NULL);
        if (!desc)
                return NULL;
+
        bitmap_blk = ext4_inode_bitmap(sb, desc);
        bh = sb_getblk(sb, bitmap_blk);
        if (unlikely(!bh)) {
@@ -123,6 +125,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                unlock_buffer(bh);
                return bh;
        }
+
        ext4_lock_group(sb, block_group);
        if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
                ext4_init_inode_bitmap(sb, bh, block_group, desc);
@@ -133,6 +136,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                return bh;
        }
        ext4_unlock_group(sb, block_group);
+
        if (buffer_uptodate(bh)) {
                /*
                 * if not uninit if bh is uptodate,
@@ -411,8 +415,8 @@ struct orlov_stats {
  * for a particular block group or flex_bg.  If flex_size is 1, then g
  * is a block group number; otherwise it is flex_bg number.
  */
-void get_orlov_stats(struct super_block *sb, ext4_group_t g,
-                      int flex_size, struct orlov_stats *stats)
+static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+                           int flex_size, struct orlov_stats *stats)
 {
        struct ext4_group_desc *desc;
        struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
@@ -712,8 +716,17 @@ static int ext4_claim_inode(struct super_block *sb,
 {
        int free = 0, retval = 0, count;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
        struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
 
+       /*
+        * We have to be sure that new inode allocation does not race with
+        * inode table initialization, because otherwise we may end up
+        * allocating and writing new inode right before sb_issue_zeroout
+        * takes place and overwriting our new inode with zeroes. So we
+        * take alloc_sem to prevent it.
+        */
+       down_read(&grp->alloc_sem);
        ext4_lock_group(sb, group);
        if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
                /* not a free inode */
@@ -724,6 +737,7 @@ static int ext4_claim_inode(struct super_block *sb,
        if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
                        ino > EXT4_INODES_PER_GROUP(sb)) {
                ext4_unlock_group(sb, group);
+               up_read(&grp->alloc_sem);
                ext4_error(sb, "reserved inode or inode > inodes count - "
                           "block_group = %u, inode=%lu", group,
                           ino + group * EXT4_INODES_PER_GROUP(sb));
@@ -772,6 +786,7 @@ static int ext4_claim_inode(struct super_block *sb,
        gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
 err_ret:
        ext4_unlock_group(sb, group);
+       up_read(&grp->alloc_sem);
        return retval;
 }
 
@@ -1205,3 +1220,109 @@ unsigned long ext4_count_dirs(struct super_block * sb)
        }
        return count;
 }
+
+/*
+ * Zeroes not yet zeroed inode table - just write zeroes through the whole
+ * inode table. Must be called without any spinlock held. The only place
+ * where it is called from on active part of filesystem is ext4lazyinit
+ * thread, so we do not need any special locks, however we have to prevent
+ * inode allocation from the current group, so we take alloc_sem lock, to
+ * block ext4_claim_inode until we are finished.
+ */
+extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+                                int barrier)
+{
+       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_group_desc *gdp = NULL;
+       struct buffer_head *group_desc_bh;
+       handle_t *handle;
+       ext4_fsblk_t blk;
+       int num, ret = 0, used_blks = 0;
+
+       /* This should not happen, but just to be sure check this */
+       if (sb->s_flags & MS_RDONLY) {
+               ret = 1;
+               goto out;
+       }
+
+       gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
+       if (!gdp)
+               goto out;
+
+       /*
+        * We do not need to lock this, because we are the only one
+        * handling this flag.
+        */
+       if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
+               goto out;
+
+       handle = ext4_journal_start_sb(sb, 1);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+
+       down_write(&grp->alloc_sem);
+       /*
+        * If inode bitmap was already initialized there may be some
+        * used inodes so we need to skip blocks with used inodes in
+        * inode table.
+        */
+       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
+               used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
+                           ext4_itable_unused_count(sb, gdp)),
+                           sbi->s_inodes_per_block);
+
+       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+               ext4_error(sb, "Something is wrong with group %u\n"
+                          "Used itable blocks: %d"
+                          "itable unused count: %u\n",
+                          group, used_blks,
+                          ext4_itable_unused_count(sb, gdp));
+               ret = 1;
+               goto out;
+       }
+
+       blk = ext4_inode_table(sb, gdp) + used_blks;
+       num = sbi->s_itb_per_group - used_blks;
+
+       BUFFER_TRACE(group_desc_bh, "get_write_access");
+       ret = ext4_journal_get_write_access(handle,
+                                           group_desc_bh);
+       if (ret)
+               goto err_out;
+
+       /*
+        * Skip zeroout if the inode table is full. But we set the ZEROED
+        * flag anyway, because obviously, when it is full it does not need
+        * further zeroing.
+        */
+       if (unlikely(num == 0))
+               goto skip_zeroout;
+
+       ext4_debug("going to zero out inode table in group %d\n",
+                  group);
+       ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
+       if (ret < 0)
+               goto err_out;
+       if (barrier)
+               blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
+
+skip_zeroout:
+       ext4_lock_group(sb, group);
+       gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
+       gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+       ext4_unlock_group(sb, group);
+
+       BUFFER_TRACE(group_desc_bh,
+                    "call ext4_handle_dirty_metadata");
+       ret = ext4_handle_dirty_metadata(handle, NULL,
+                                        group_desc_bh);
+
+err_out:
+       up_write(&grp->alloc_sem);
+       ext4_journal_stop(handle);
+out:
+       return ret;
+}
index 49635ef236f84d40b0090a78a1a96860fae1fb6b..2d6c6c8c036df496f0b44e549d800d26494abdef 100644 (file)
@@ -60,6 +60,12 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
 }
 
 static void ext4_invalidatepage(struct page *page, unsigned long offset);
+static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
+                                  struct buffer_head *bh_result, int create);
+static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
+static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
+static int __ext4_journalled_writepage(struct page *page, unsigned int len);
+static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 
 /*
  * Test whether an inode is a fast symlink.
@@ -755,6 +761,11 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                 * parent to disk.
                 */
                bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+               if (unlikely(!bh)) {
+                       err = -EIO;
+                       goto failed;
+               }
+
                branch[n].bh = bh;
                lock_buffer(bh);
                BUFFER_TRACE(bh, "call get_create_access");
@@ -1207,8 +1218,10 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
                                break;
                        idx++;
                        num++;
-                       if (num >= max_pages)
+                       if (num >= max_pages) {
+                               done = 1;
                                break;
+                       }
                }
                pagevec_release(&pvec);
        }
@@ -1995,16 +2008,23 @@ static void ext4_da_page_release_reservation(struct page *page,
  *
  * As pages are already locked by write_cache_pages(), we can't use it
  */
-static int mpage_da_submit_io(struct mpage_da_data *mpd)
+static int mpage_da_submit_io(struct mpage_da_data *mpd,
+                             struct ext4_map_blocks *map)
 {
-       long pages_skipped;
        struct pagevec pvec;
        unsigned long index, end;
        int ret = 0, err, nr_pages, i;
        struct inode *inode = mpd->inode;
        struct address_space *mapping = inode->i_mapping;
+       loff_t size = i_size_read(inode);
+       unsigned int len, block_start;
+       struct buffer_head *bh, *page_bufs = NULL;
+       int journal_data = ext4_should_journal_data(inode);
+       sector_t pblock = 0, cur_logical = 0;
+       struct ext4_io_submit io_submit;
 
        BUG_ON(mpd->next_page <= mpd->first_page);
+       memset(&io_submit, 0, sizeof(io_submit));
        /*
         * We need to start from the first_page to the next_page - 1
         * to make sure we also write the mapped dirty buffer_heads.
@@ -2020,122 +2040,108 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
                if (nr_pages == 0)
                        break;
                for (i = 0; i < nr_pages; i++) {
+                       int commit_write = 0, redirty_page = 0;
                        struct page *page = pvec.pages[i];
 
                        index = page->index;
                        if (index > end)
                                break;
+
+                       if (index == size >> PAGE_CACHE_SHIFT)
+                               len = size & ~PAGE_CACHE_MASK;
+                       else
+                               len = PAGE_CACHE_SIZE;
+                       if (map) {
+                               cur_logical = index << (PAGE_CACHE_SHIFT -
+                                                       inode->i_blkbits);
+                               pblock = map->m_pblk + (cur_logical -
+                                                       map->m_lblk);
+                       }
                        index++;
 
                        BUG_ON(!PageLocked(page));
                        BUG_ON(PageWriteback(page));
 
-                       pages_skipped = mpd->wbc->pages_skipped;
-                       err = mapping->a_ops->writepage(page, mpd->wbc);
-                       if (!err && (pages_skipped == mpd->wbc->pages_skipped))
-                               /*
-                                * have successfully written the page
-                                * without skipping the same
-                                */
-                               mpd->pages_written++;
                        /*
-                        * In error case, we have to continue because
-                        * remaining pages are still locked
-                        * XXX: unlock and re-dirty them?
+                        * If the page does not have buffers (for
+                        * whatever reason), try to create them using
+                        * __block_write_begin.  If this fails,
+                        * redirty the page and move on.
                         */
-                       if (ret == 0)
-                               ret = err;
-               }
-               pagevec_release(&pvec);
-       }
-       return ret;
-}
-
-/*
- * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
- *
- * the function goes through all passed space and put actual disk
- * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
- */
-static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
-                                struct ext4_map_blocks *map)
-{
-       struct inode *inode = mpd->inode;
-       struct address_space *mapping = inode->i_mapping;
-       int blocks = map->m_len;
-       sector_t pblock = map->m_pblk, cur_logical;
-       struct buffer_head *head, *bh;
-       pgoff_t index, end;
-       struct pagevec pvec;
-       int nr_pages, i;
-
-       index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
-       pagevec_init(&pvec, 0);
-
-       while (index <= end) {
-               /* XXX: optimize tail */
-               nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
-               if (nr_pages == 0)
-                       break;
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-
-                       index = page->index;
-                       if (index > end)
-                               break;
-                       index++;
-
-                       BUG_ON(!PageLocked(page));
-                       BUG_ON(PageWriteback(page));
-                       BUG_ON(!page_has_buffers(page));
-
-                       bh = page_buffers(page);
-                       head = bh;
-
-                       /* skip blocks out of the range */
-                       do {
-                               if (cur_logical >= map->m_lblk)
-                                       break;
-                               cur_logical++;
-                       } while ((bh = bh->b_this_page) != head);
+                       if (!page_has_buffers(page)) {
+                               if (__block_write_begin(page, 0, len,
+                                               noalloc_get_block_write)) {
+                               redirty_page:
+                                       redirty_page_for_writepage(mpd->wbc,
+                                                                  page);
+                                       unlock_page(page);
+                                       continue;
+                               }
+                               commit_write = 1;
+                       }
 
+                       bh = page_bufs = page_buffers(page);
+                       block_start = 0;
                        do {
-                               if (cur_logical >= map->m_lblk + blocks)
-                                       break;
-
-                               if (buffer_delay(bh) || buffer_unwritten(bh)) {
-
-                                       BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
-
+                               if (!bh)
+                                       goto redirty_page;
+                               if (map && (cur_logical >= map->m_lblk) &&
+                                   (cur_logical <= (map->m_lblk +
+                                                    (map->m_len - 1)))) {
                                        if (buffer_delay(bh)) {
                                                clear_buffer_delay(bh);
                                                bh->b_blocknr = pblock;
-                                       } else {
-                                               /*
-                                                * unwritten already should have
-                                                * blocknr assigned. Verify that
-                                                */
-                                               clear_buffer_unwritten(bh);
-                                               BUG_ON(bh->b_blocknr != pblock);
                                        }
+                                       if (buffer_unwritten(bh) ||
+                                           buffer_mapped(bh))
+                                               BUG_ON(bh->b_blocknr != pblock);
+                                       if (map->m_flags & EXT4_MAP_UNINIT)
+                                               set_buffer_uninit(bh);
+                                       clear_buffer_unwritten(bh);
+                               }
 
-                               } else if (buffer_mapped(bh))
-                                       BUG_ON(bh->b_blocknr != pblock);
-
-                               if (map->m_flags & EXT4_MAP_UNINIT)
-                                       set_buffer_uninit(bh);
+                               /* redirty page if block allocation undone */
+                               if (buffer_delay(bh) || buffer_unwritten(bh))
+                                       redirty_page = 1;
+                               bh = bh->b_this_page;
+                               block_start += bh->b_size;
                                cur_logical++;
                                pblock++;
-                       } while ((bh = bh->b_this_page) != head);
+                       } while (bh != page_bufs);
+
+                       if (redirty_page)
+                               goto redirty_page;
+
+                       if (commit_write)
+                               /* mark the buffer_heads as dirty & uptodate */
+                               block_commit_write(page, 0, len);
+
+                       /*
+                        * Delalloc doesn't support data journalling,
+                        * but eventually maybe we'll lift this
+                        * restriction.
+                        */
+                       if (unlikely(journal_data && PageChecked(page)))
+                               err = __ext4_journalled_writepage(page, len);
+                       else
+                               err = ext4_bio_write_page(&io_submit, page,
+                                                         len, mpd->wbc);
+
+                       if (!err)
+                               mpd->pages_written++;
+                       /*
+                        * In error case, we have to continue because
+                        * remaining pages are still locked
+                        */
+                       if (ret == 0)
+                               ret = err;
                }
                pagevec_release(&pvec);
        }
+       ext4_io_submit(&io_submit);
+       return ret;
 }
 
-
 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
                                        sector_t logical, long blk_cnt)
 {
@@ -2187,35 +2193,32 @@ static void ext4_print_free_blocks(struct inode *inode)
 }
 
 /*
- * mpage_da_map_blocks - go through given space
+ * mpage_da_map_and_submit - go through given space, map them
+ *       if necessary, and then submit them for I/O
  *
  * @mpd - bh describing space
  *
  * The function skips space we know is already mapped to disk blocks.
  *
  */
-static int mpage_da_map_blocks(struct mpage_da_data *mpd)
+static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
 {
        int err, blks, get_blocks_flags;
-       struct ext4_map_blocks map;
+       struct ext4_map_blocks map, *mapp = NULL;
        sector_t next = mpd->b_blocknr;
        unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
        loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
        handle_t *handle = NULL;
 
        /*
-        * We consider only non-mapped and non-allocated blocks
-        */
-       if ((mpd->b_state  & (1 << BH_Mapped)) &&
-               !(mpd->b_state & (1 << BH_Delay)) &&
-               !(mpd->b_state & (1 << BH_Unwritten)))
-               return 0;
-
-       /*
-        * If we didn't accumulate anything to write simply return
+        * If the blocks are mapped already, or we couldn't accumulate
+        * any blocks, then proceed immediately to the submission stage.
         */
-       if (!mpd->b_size)
-               return 0;
+       if ((mpd->b_size == 0) ||
+           ((mpd->b_state  & (1 << BH_Mapped)) &&
+            !(mpd->b_state & (1 << BH_Delay)) &&
+            !(mpd->b_state & (1 << BH_Unwritten))))
+               goto submit_io;
 
        handle = ext4_journal_current_handle();
        BUG_ON(!handle);
@@ -2252,17 +2255,18 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
 
                err = blks;
                /*
-                * If get block returns with error we simply
-                * return. Later writepage will redirty the page and
-                * writepages will find the dirty page again
+                * If get block returns EAGAIN or ENOSPC and there
+                * appears to be free blocks we will call
+                * ext4_writepage() for all of the pages which will
+                * just redirty the pages.
                 */
                if (err == -EAGAIN)
-                       return 0;
+                       goto submit_io;
 
                if (err == -ENOSPC &&
                    ext4_count_free_blocks(sb)) {
                        mpd->retval = err;
-                       return 0;
+                       goto submit_io;
                }
 
                /*
@@ -2287,10 +2291,11 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                /* invalidate all the pages */
                ext4_da_block_invalidatepages(mpd, next,
                                mpd->b_size >> mpd->inode->i_blkbits);
-               return err;
+               return;
        }
        BUG_ON(blks == 0);
 
+       mapp = &map;
        if (map.m_flags & EXT4_MAP_NEW) {
                struct block_device *bdev = mpd->inode->i_sb->s_bdev;
                int i;
@@ -2299,18 +2304,11 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                        unmap_underlying_metadata(bdev, map.m_pblk + i);
        }
 
-       /*
-        * If blocks are delayed marked, we need to
-        * put actual blocknr and drop delayed bit
-        */
-       if ((mpd->b_state & (1 << BH_Delay)) ||
-           (mpd->b_state & (1 << BH_Unwritten)))
-               mpage_put_bnr_to_bhs(mpd, &map);
-
        if (ext4_should_order_data(mpd->inode)) {
                err = ext4_jbd2_file_inode(handle, mpd->inode);
                if (err)
-                       return err;
+                       /* This only happens if the journal is aborted */
+                       return;
        }
 
        /*
@@ -2321,10 +2319,16 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                disksize = i_size_read(mpd->inode);
        if (disksize > EXT4_I(mpd->inode)->i_disksize) {
                ext4_update_i_disksize(mpd->inode, disksize);
-               return ext4_mark_inode_dirty(handle, mpd->inode);
+               err = ext4_mark_inode_dirty(handle, mpd->inode);
+               if (err)
+                       ext4_error(mpd->inode->i_sb,
+                                  "Failed to mark inode %lu dirty",
+                                  mpd->inode->i_ino);
        }
 
-       return 0;
+submit_io:
+       mpage_da_submit_io(mpd, mapp);
+       mpd->io_done = 1;
 }
 
 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
@@ -2401,9 +2405,7 @@ flush_it:
         * We couldn't merge the block to our extent, so we
         * need to flush current  extent and start new one
         */
-       if (mpage_da_map_blocks(mpd) == 0)
-               mpage_da_submit_io(mpd);
-       mpd->io_done = 1;
+       mpage_da_map_and_submit(mpd);
        return;
 }
 
@@ -2422,9 +2424,9 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
  * The function finds extents of pages and scan them for all blocks.
  */
 static int __mpage_da_writepage(struct page *page,
-                               struct writeback_control *wbc, void *data)
+                               struct writeback_control *wbc,
+                               struct mpage_da_data *mpd)
 {
-       struct mpage_da_data *mpd = data;
        struct inode *inode = mpd->inode;
        struct buffer_head *bh, *head;
        sector_t logical;
@@ -2435,15 +2437,13 @@ static int __mpage_da_writepage(struct page *page,
        if (mpd->next_page != page->index) {
                /*
                 * Nope, we can't. So, we map non-allocated blocks
-                * and start IO on them using writepage()
+                * and start IO on them
                 */
                if (mpd->next_page != mpd->first_page) {
-                       if (mpage_da_map_blocks(mpd) == 0)
-                               mpage_da_submit_io(mpd);
+                       mpage_da_map_and_submit(mpd);
                        /*
                         * skip rest of the page in the page_vec
                         */
-                       mpd->io_done = 1;
                        redirty_page_for_writepage(wbc, page);
                        unlock_page(page);
                        return MPAGE_DA_EXTENT_TAIL;
@@ -2622,6 +2622,7 @@ static int __ext4_journalled_writepage(struct page *page,
        int ret = 0;
        int err;
 
+       ClearPageChecked(page);
        page_bufs = page_buffers(page);
        BUG_ON(!page_bufs);
        walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
@@ -2699,7 +2700,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
 static int ext4_writepage(struct page *page,
                          struct writeback_control *wbc)
 {
-       int ret = 0;
+       int ret = 0, commit_write = 0;
        loff_t size;
        unsigned int len;
        struct buffer_head *page_bufs = NULL;
@@ -2712,71 +2713,46 @@ static int ext4_writepage(struct page *page,
        else
                len = PAGE_CACHE_SIZE;
 
-       if (page_has_buffers(page)) {
-               page_bufs = page_buffers(page);
-               if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                       ext4_bh_delay_or_unwritten)) {
-                       /*
-                        * We don't want to do  block allocation
-                        * So redirty the page and return
-                        * We may reach here when we do a journal commit
-                        * via journal_submit_inode_data_buffers.
-                        * If we don't have mapping block we just ignore
-                        * them. We can also reach here via shrink_page_list
-                        */
+       /*
+        * If the page does not have buffers (for whatever reason),
+        * try to create them using __block_write_begin.  If this
+        * fails, redirty the page and move on.
+        */
+       if (!page_buffers(page)) {
+               if (__block_write_begin(page, 0, len,
+                                       noalloc_get_block_write)) {
+               redirty_page:
                        redirty_page_for_writepage(wbc, page);
                        unlock_page(page);
                        return 0;
                }
-       } else {
+               commit_write = 1;
+       }
+       page_bufs = page_buffers(page);
+       if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+                             ext4_bh_delay_or_unwritten)) {
                /*
-                * The test for page_has_buffers() is subtle:
-                * We know the page is dirty but it lost buffers. That means
-                * that at some moment in time after write_begin()/write_end()
-                * has been called all buffers have been clean and thus they
-                * must have been written at least once. So they are all
-                * mapped and we can happily proceed with mapping them
-                * and writing the page.
-                *
-                * Try to initialize the buffer_heads and check whether
-                * all are mapped and non delay. We don't want to
-                * do block allocation here.
+                * We don't want to do block allocation So redirty the
+                * page and return We may reach here when we do a
+                * journal commit via
+                * journal_submit_inode_data_buffers.  If we don't
+                * have mapping block we just ignore them. We can also
+                * reach here via shrink_page_list
                 */
-               ret = __block_write_begin(page, 0, len,
-                                         noalloc_get_block_write);
-               if (!ret) {
-                       page_bufs = page_buffers(page);
-                       /* check whether all are mapped and non delay */
-                       if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                               ext4_bh_delay_or_unwritten)) {
-                               redirty_page_for_writepage(wbc, page);
-                               unlock_page(page);
-                               return 0;
-                       }
-               } else {
-                       /*
-                        * We can't do block allocation here
-                        * so just redity the page and unlock
-                        * and return
-                        */
-                       redirty_page_for_writepage(wbc, page);
-                       unlock_page(page);
-                       return 0;
-               }
+               goto redirty_page;
+       }
+       if (commit_write)
                /* now mark the buffer_heads as dirty and uptodate */
                block_commit_write(page, 0, len);
-       }
 
-       if (PageChecked(page) && ext4_should_journal_data(inode)) {
+       if (PageChecked(page) && ext4_should_journal_data(inode))
                /*
                 * It's mmapped pagecache.  Add buffers and journal it.  There
                 * doesn't seem much point in redirtying the page here.
                 */
-               ClearPageChecked(page);
                return __ext4_journalled_writepage(page, len);
-       }
 
-       if (page_bufs && buffer_uninit(page_bufs)) {
+       if (buffer_uninit(page_bufs)) {
                ext4_set_bh_endio(page_bufs, inode);
                ret = block_write_full_page_endio(page, noalloc_get_block_write,
                                            wbc, ext4_end_io_buffer_write);
@@ -2823,25 +2799,32 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
  */
 static int write_cache_pages_da(struct address_space *mapping,
                                struct writeback_control *wbc,
-                               struct mpage_da_data *mpd)
+                               struct mpage_da_data *mpd,
+                               pgoff_t *done_index)
 {
        int ret = 0;
        int done = 0;
        struct pagevec pvec;
-       int nr_pages;
+       unsigned nr_pages;
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
        long nr_to_write = wbc->nr_to_write;
+       int tag;
 
        pagevec_init(&pvec, 0);
        index = wbc->range_start >> PAGE_CACHE_SHIFT;
        end = wbc->range_end >> PAGE_CACHE_SHIFT;
 
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               tag = PAGECACHE_TAG_TOWRITE;
+       else
+               tag = PAGECACHE_TAG_DIRTY;
+
+       *done_index = index;
        while (!done && (index <= end)) {
                int i;
 
-               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                             PAGECACHE_TAG_DIRTY,
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
                if (nr_pages == 0)
                        break;
@@ -2861,6 +2844,8 @@ static int write_cache_pages_da(struct address_space *mapping,
                                break;
                        }
 
+                       *done_index = page->index + 1;
+
                        lock_page(page);
 
                        /*
@@ -2946,6 +2931,8 @@ static int ext4_da_writepages(struct address_space *mapping,
        long desired_nr_to_write, nr_to_writebump = 0;
        loff_t range_start = wbc->range_start;
        struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
+       pgoff_t done_index = 0;
+       pgoff_t end;
 
        trace_ext4_da_writepages(inode, wbc);
 
@@ -2981,8 +2968,11 @@ static int ext4_da_writepages(struct address_space *mapping,
                wbc->range_start = index << PAGE_CACHE_SHIFT;
                wbc->range_end  = LLONG_MAX;
                wbc->range_cyclic = 0;
-       } else
+               end = -1;
+       } else {
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
+               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+       }
 
        /*
         * This works around two forms of stupidity.  The first is in
@@ -3001,9 +2991,12 @@ static int ext4_da_writepages(struct address_space *mapping,
         * sbi->max_writeback_mb_bump whichever is smaller.
         */
        max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
-       if (!range_cyclic && range_whole)
-               desired_nr_to_write = wbc->nr_to_write * 8;
-       else
+       if (!range_cyclic && range_whole) {
+               if (wbc->nr_to_write == LONG_MAX)
+                       desired_nr_to_write = wbc->nr_to_write;
+               else
+                       desired_nr_to_write = wbc->nr_to_write * 8;
+       } else
                desired_nr_to_write = ext4_num_dirty_pages(inode, index,
                                                           max_pages);
        if (desired_nr_to_write > max_pages)
@@ -3020,6 +3013,9 @@ static int ext4_da_writepages(struct address_space *mapping,
        pages_skipped = wbc->pages_skipped;
 
 retry:
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               tag_pages_for_writeback(mapping, index, end);
+
        while (!ret && wbc->nr_to_write > 0) {
 
                /*
@@ -3058,16 +3054,14 @@ retry:
                mpd.io_done = 0;
                mpd.pages_written = 0;
                mpd.retval = 0;
-               ret = write_cache_pages_da(mapping, wbc, &mpd);
+               ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
                /*
                 * If we have a contiguous extent of pages and we
                 * haven't done the I/O yet, map the blocks and submit
                 * them for I/O.
                 */
                if (!mpd.io_done && mpd.next_page != mpd.first_page) {
-                       if (mpage_da_map_blocks(&mpd) == 0)
-                               mpage_da_submit_io(&mpd);
-                       mpd.io_done = 1;
+                       mpage_da_map_and_submit(&mpd);
                        ret = MPAGE_DA_EXTENT_TAIL;
                }
                trace_ext4_da_write_pages(inode, &mpd);
@@ -3114,14 +3108,13 @@ retry:
                         __func__, wbc->nr_to_write, ret);
 
        /* Update index */
-       index += pages_written;
        wbc->range_cyclic = range_cyclic;
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                /*
                 * set the writeback_index so that range_cyclic
                 * mode will write it back later
                 */
-               mapping->writeback_index = index;
+               mapping->writeback_index = done_index;
 
 out_writepages:
        wbc->nr_to_write -= nr_to_writebump;
@@ -3456,15 +3449,6 @@ ext4_readpages(struct file *file, struct address_space *mapping,
        return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
 }
 
-static void ext4_free_io_end(ext4_io_end_t *io)
-{
-       BUG_ON(!io);
-       if (io->page)
-               put_page(io->page);
-       iput(io->inode);
-       kfree(io);
-}
-
 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
 {
        struct buffer_head *head, *bh;
@@ -3641,173 +3625,6 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
                               EXT4_GET_BLOCKS_IO_CREATE_EXT);
 }
 
-static void dump_completed_IO(struct inode * inode)
-{
-#ifdef EXT4_DEBUG
-       struct list_head *cur, *before, *after;
-       ext4_io_end_t *io, *io0, *io1;
-       unsigned long flags;
-
-       if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
-               ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
-               return;
-       }
-
-       ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
-       spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
-       list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
-               cur = &io->list;
-               before = cur->prev;
-               io0 = container_of(before, ext4_io_end_t, list);
-               after = cur->next;
-               io1 = container_of(after, ext4_io_end_t, list);
-
-               ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
-                           io, inode->i_ino, io0, io1);
-       }
-       spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
-#endif
-}
-
-/*
- * check a range of space and convert unwritten extents to written.
- */
-static int ext4_end_io_nolock(ext4_io_end_t *io)
-{
-       struct inode *inode = io->inode;
-       loff_t offset = io->offset;
-       ssize_t size = io->size;
-       int ret = 0;
-
-       ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
-                  "list->prev 0x%p\n",
-                  io, inode->i_ino, io->list.next, io->list.prev);
-
-       if (list_empty(&io->list))
-               return ret;
-
-       if (io->flag != EXT4_IO_UNWRITTEN)
-               return ret;
-
-       ret = ext4_convert_unwritten_extents(inode, offset, size);
-       if (ret < 0) {
-               printk(KERN_EMERG "%s: failed to convert unwritten"
-                       "extents to written extents, error is %d"
-                       " io is still on inode %lu aio dio list\n",
-                       __func__, ret, inode->i_ino);
-               return ret;
-       }
-
-       if (io->iocb)
-               aio_complete(io->iocb, io->result, 0);
-       /* clear the DIO AIO unwritten flag */
-       io->flag = 0;
-       return ret;
-}
-
-/*
- * work on completed aio dio IO, to convert unwritten extents to extents
- */
-static void ext4_end_io_work(struct work_struct *work)
-{
-       ext4_io_end_t           *io = container_of(work, ext4_io_end_t, work);
-       struct inode            *inode = io->inode;
-       struct ext4_inode_info  *ei = EXT4_I(inode);
-       unsigned long           flags;
-       int                     ret;
-
-       mutex_lock(&inode->i_mutex);
-       ret = ext4_end_io_nolock(io);
-       if (ret < 0) {
-               mutex_unlock(&inode->i_mutex);
-               return;
-       }
-
-       spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       if (!list_empty(&io->list))
-               list_del_init(&io->list);
-       spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-       mutex_unlock(&inode->i_mutex);
-       ext4_free_io_end(io);
-}
-
-/*
- * This function is called from ext4_sync_file().
- *
- * When IO is completed, the work to convert unwritten extents to
- * written is queued on workqueue but may not get immediately
- * scheduled. When fsync is called, we need to ensure the
- * conversion is complete before fsync returns.
- * The inode keeps track of a list of pending/completed IO that
- * might needs to do the conversion. This function walks through
- * the list and convert the related unwritten extents for completed IO
- * to written.
- * The function return the number of pending IOs on success.
- */
-int flush_completed_IO(struct inode *inode)
-{
-       ext4_io_end_t *io;
-       struct ext4_inode_info *ei = EXT4_I(inode);
-       unsigned long flags;
-       int ret = 0;
-       int ret2 = 0;
-
-       if (list_empty(&ei->i_completed_io_list))
-               return ret;
-
-       dump_completed_IO(inode);
-       spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       while (!list_empty(&ei->i_completed_io_list)){
-               io = list_entry(ei->i_completed_io_list.next,
-                               ext4_io_end_t, list);
-               /*
-                * Calling ext4_end_io_nolock() to convert completed
-                * IO to written.
-                *
-                * When ext4_sync_file() is called, run_queue() may already
-                * about to flush the work corresponding to this io structure.
-                * It will be upset if it founds the io structure related
-                * to the work-to-be schedule is freed.
-                *
-                * Thus we need to keep the io structure still valid here after
-                * convertion finished. The io structure has a flag to
-                * avoid double converting from both fsync and background work
-                * queue work.
-                */
-               spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-               ret = ext4_end_io_nolock(io);
-               spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-               if (ret < 0)
-                       ret2 = ret;
-               else
-                       list_del_init(&io->list);
-       }
-       spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-       return (ret2 < 0) ? ret2 : 0;
-}
-
-static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
-{
-       ext4_io_end_t *io = NULL;
-
-       io = kmalloc(sizeof(*io), flags);
-
-       if (io) {
-               igrab(inode);
-               io->inode = inode;
-               io->flag = 0;
-               io->offset = 0;
-               io->size = 0;
-               io->page = NULL;
-               io->iocb = NULL;
-               io->result = 0;
-               INIT_WORK(&io->work, ext4_end_io_work);
-               INIT_LIST_HEAD(&io->list);
-       }
-
-       return io;
-}
-
 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                            ssize_t size, void *private, int ret,
                            bool is_async)
@@ -3827,7 +3644,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  size);
 
        /* if not aio dio with unwritten extents, just free io and return */
-       if (io_end->flag != EXT4_IO_UNWRITTEN){
+       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
                ext4_free_io_end(io_end);
                iocb->private = NULL;
 out:
@@ -3844,14 +3661,14 @@ out:
        }
        wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
 
-       /* queue the work to convert unwritten extents to written */
-       queue_work(wq, &io_end->work);
-
        /* Add the io_end to per-inode completed aio dio list*/
        ei = EXT4_I(io_end->inode);
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
        list_add_tail(&io_end->list, &ei->i_completed_io_list);
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+
+       /* queue the work to convert unwritten extents to written */
+       queue_work(wq, &io_end->work);
        iocb->private = NULL;
 }
 
@@ -3872,7 +3689,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
                goto out;
        }
 
-       io_end->flag = EXT4_IO_UNWRITTEN;
+       io_end->flag = EXT4_IO_END_UNWRITTEN;
        inode = io_end->inode;
 
        /* Add the io_end to per-inode completed io list*/
@@ -5463,6 +5280,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
        int error, rc = 0;
+       int orphan = 0;
        const unsigned int ia_valid = attr->ia_valid;
 
        error = inode_change_ok(inode, attr);
@@ -5518,8 +5336,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                        error = PTR_ERR(handle);
                        goto err_out;
                }
-
-               error = ext4_orphan_add(handle, inode);
+               if (ext4_handle_valid(handle)) {
+                       error = ext4_orphan_add(handle, inode);
+                       orphan = 1;
+               }
                EXT4_I(inode)->i_disksize = attr->ia_size;
                rc = ext4_mark_inode_dirty(handle, inode);
                if (!error)
@@ -5537,6 +5357,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                                        goto err_out;
                                }
                                ext4_orphan_del(handle, inode);
+                               orphan = 0;
                                ext4_journal_stop(handle);
                                goto err_out;
                        }
@@ -5559,7 +5380,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
         * If the call to ext4_truncate failed to get a transaction handle at
         * all, we need to clean up the in-core orphan list manually.
         */
-       if (inode->i_nlink)
+       if (orphan && inode->i_nlink)
                ext4_orphan_del(NULL, inode);
 
        if (!rc && (ia_valid & ATTR_MODE))
@@ -5642,7 +5463,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  *
  * Also account for superblock, inode, quota and xattr blocks
  */
-int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 {
        ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
        int gdpblocks;
index 42f77b1dc72d810f48deb583baeb0f984c6fbc61..c58eba34724a4281f1cb8bb405ab607ef09b49ff 100644 (file)
 static struct kmem_cache *ext4_pspace_cachep;
 static struct kmem_cache *ext4_ac_cachep;
 static struct kmem_cache *ext4_free_ext_cachep;
+
+/* We create slab caches for groupinfo data structures based on the
+ * superblock block size.  There will be one per mounted filesystem for
+ * each unique s_blocksize_bits */
+#define NR_GRPINFO_CACHES      \
+       (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
+static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
+
 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
                                        ext4_group_t group);
 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
@@ -938,6 +946,85 @@ out:
        return err;
 }
 
+/*
+ * lock the group_info alloc_sem of all the groups
+ * belonging to the same buddy cache page. This
+ * make sure other parallel operation on the buddy
+ * cache doesn't happen  whild holding the buddy cache
+ * lock
+ */
+static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
+                                       ext4_group_t group)
+{
+       int i;
+       int block, pnum;
+       int blocks_per_page;
+       int groups_per_page;
+       ext4_group_t ngroups = ext4_get_groups_count(sb);
+       ext4_group_t first_group;
+       struct ext4_group_info *grp;
+
+       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+       /*
+        * the buddy cache inode stores the block bitmap
+        * and buddy information in consecutive blocks.
+        * So for each group we need two blocks.
+        */
+       block = group * 2;
+       pnum = block / blocks_per_page;
+       first_group = pnum * blocks_per_page / 2;
+
+       groups_per_page = blocks_per_page >> 1;
+       if (groups_per_page == 0)
+               groups_per_page = 1;
+       /* read all groups the page covers into the cache */
+       for (i = 0; i < groups_per_page; i++) {
+
+               if ((first_group + i) >= ngroups)
+                       break;
+               grp = ext4_get_group_info(sb, first_group + i);
+               /* take all groups write allocation
+                * semaphore. This make sure there is
+                * no block allocation going on in any
+                * of that groups
+                */
+               down_write_nested(&grp->alloc_sem, i);
+       }
+       return i;
+}
+
+static void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
+                                        ext4_group_t group, int locked_group)
+{
+       int i;
+       int block, pnum;
+       int blocks_per_page;
+       ext4_group_t first_group;
+       struct ext4_group_info *grp;
+
+       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+       /*
+        * the buddy cache inode stores the block bitmap
+        * and buddy information in consecutive blocks.
+        * So for each group we need two blocks.
+        */
+       block = group * 2;
+       pnum = block / blocks_per_page;
+       first_group = pnum * blocks_per_page / 2;
+       /* release locks on all the groups */
+       for (i = 0; i < locked_group; i++) {
+
+               grp = ext4_get_group_info(sb, first_group + i);
+               /* take all groups write allocation
+                * semaphore. This make sure there is
+                * no block allocation going on in any
+                * of that groups
+                */
+               up_write(&grp->alloc_sem);
+       }
+
+}
+
 /*
  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
  * block group lock of all groups for this page; do not hold the BG lock when
@@ -1915,84 +2002,6 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
        return 0;
 }
 
-/*
- * lock the group_info alloc_sem of all the groups
- * belonging to the same buddy cache page. This
- * make sure other parallel operation on the buddy
- * cache doesn't happen  whild holding the buddy cache
- * lock
- */
-int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
-{
-       int i;
-       int block, pnum;
-       int blocks_per_page;
-       int groups_per_page;
-       ext4_group_t ngroups = ext4_get_groups_count(sb);
-       ext4_group_t first_group;
-       struct ext4_group_info *grp;
-
-       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-       /*
-        * the buddy cache inode stores the block bitmap
-        * and buddy information in consecutive blocks.
-        * So for each group we need two blocks.
-        */
-       block = group * 2;
-       pnum = block / blocks_per_page;
-       first_group = pnum * blocks_per_page / 2;
-
-       groups_per_page = blocks_per_page >> 1;
-       if (groups_per_page == 0)
-               groups_per_page = 1;
-       /* read all groups the page covers into the cache */
-       for (i = 0; i < groups_per_page; i++) {
-
-               if ((first_group + i) >= ngroups)
-                       break;
-               grp = ext4_get_group_info(sb, first_group + i);
-               /* take all groups write allocation
-                * semaphore. This make sure there is
-                * no block allocation going on in any
-                * of that groups
-                */
-               down_write_nested(&grp->alloc_sem, i);
-       }
-       return i;
-}
-
-void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
-                                       ext4_group_t group, int locked_group)
-{
-       int i;
-       int block, pnum;
-       int blocks_per_page;
-       ext4_group_t first_group;
-       struct ext4_group_info *grp;
-
-       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-       /*
-        * the buddy cache inode stores the block bitmap
-        * and buddy information in consecutive blocks.
-        * So for each group we need two blocks.
-        */
-       block = group * 2;
-       pnum = block / blocks_per_page;
-       first_group = pnum * blocks_per_page / 2;
-       /* release locks on all the groups */
-       for (i = 0; i < locked_group; i++) {
-
-               grp = ext4_get_group_info(sb, first_group + i);
-               /* take all groups write allocation
-                * semaphore. This make sure there is
-                * no block allocation going on in any
-                * of that groups
-                */
-               up_write(&grp->alloc_sem);
-       }
-
-}
-
 static noinline_for_stack int
 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 {
@@ -2233,15 +2242,24 @@ static const struct file_operations ext4_mb_seq_groups_fops = {
        .release        = seq_release,
 };
 
+static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
+{
+       int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+       struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
+
+       BUG_ON(!cachep);
+       return cachep;
+}
 
 /* Create and initialize ext4_group_info data for the given group. */
 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
                          struct ext4_group_desc *desc)
 {
-       int i, len;
+       int i;
        int metalen = 0;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_group_info **meta_group_info;
+       struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
        /*
         * First check if this group is the first of a reserved block.
@@ -2261,22 +2279,16 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
                        meta_group_info;
        }
 
-       /*
-        * calculate needed size. if change bb_counters size,
-        * don't forget about ext4_mb_generate_buddy()
-        */
-       len = offsetof(typeof(**meta_group_info),
-                      bb_counters[sb->s_blocksize_bits + 2]);
-
        meta_group_info =
                sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
        i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
 
-       meta_group_info[i] = kzalloc(len, GFP_KERNEL);
+       meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
        if (meta_group_info[i] == NULL) {
                printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
                goto exit_group_info;
        }
+       memset(meta_group_info[i], 0, kmem_cache_size(cachep));
        set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
                &(meta_group_info[i]->bb_state));
 
@@ -2331,6 +2343,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
        int num_meta_group_infos_max;
        int array_size;
        struct ext4_group_desc *desc;
+       struct kmem_cache *cachep;
 
        /* This is the number of blocks used by GDT */
        num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
@@ -2389,8 +2402,9 @@ static int ext4_mb_init_backend(struct super_block *sb)
        return 0;
 
 err_freebuddy:
+       cachep = get_groupinfo_cache(sb->s_blocksize_bits);
        while (i-- > 0)
-               kfree(ext4_get_group_info(sb, i));
+               kmem_cache_free(cachep, ext4_get_group_info(sb, i));
        i = num_meta_group_infos;
        while (i-- > 0)
                kfree(sbi->s_group_info[i]);
@@ -2407,19 +2421,48 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
        unsigned offset;
        unsigned max;
        int ret;
+       int cache_index;
+       struct kmem_cache *cachep;
+       char *namep = NULL;
 
        i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
 
        sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
        if (sbi->s_mb_offsets == NULL) {
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out;
        }
 
        i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
        sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
        if (sbi->s_mb_maxs == NULL) {
-               kfree(sbi->s_mb_offsets);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+       cachep = ext4_groupinfo_caches[cache_index];
+       if (!cachep) {
+               char name[32];
+               int len = offsetof(struct ext4_group_info,
+                                       bb_counters[sb->s_blocksize_bits + 2]);
+
+               sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
+               namep = kstrdup(name, GFP_KERNEL);
+               if (!namep) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               /* Need to free the kmem_cache_name() when we
+                * destroy the slab */
+               cachep = kmem_cache_create(namep, len, 0,
+                                            SLAB_RECLAIM_ACCOUNT, NULL);
+               if (!cachep) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               ext4_groupinfo_caches[cache_index] = cachep;
        }
 
        /* order 0 is regular bitmap */
@@ -2440,9 +2483,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
        /* init file for buddy data */
        ret = ext4_mb_init_backend(sb);
        if (ret != 0) {
-               kfree(sbi->s_mb_offsets);
-               kfree(sbi->s_mb_maxs);
-               return ret;
+               goto out;
        }
 
        spin_lock_init(&sbi->s_md_lock);
@@ -2457,9 +2498,8 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
 
        sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
        if (sbi->s_locality_groups == NULL) {
-               kfree(sbi->s_mb_offsets);
-               kfree(sbi->s_mb_maxs);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out;
        }
        for_each_possible_cpu(i) {
                struct ext4_locality_group *lg;
@@ -2476,7 +2516,13 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
 
        if (sbi->s_journal)
                sbi->s_journal->j_commit_callback = release_blocks_on_commit;
-       return 0;
+out:
+       if (ret) {
+               kfree(sbi->s_mb_offsets);
+               kfree(sbi->s_mb_maxs);
+               kfree(namep);
+       }
+       return ret;
 }
 
 /* need to called with the ext4 group lock held */
@@ -2504,6 +2550,7 @@ int ext4_mb_release(struct super_block *sb)
        int num_meta_group_infos;
        struct ext4_group_info *grinfo;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
        if (sbi->s_group_info) {
                for (i = 0; i < ngroups; i++) {
@@ -2514,7 +2561,7 @@ int ext4_mb_release(struct super_block *sb)
                        ext4_lock_group(sb, i);
                        ext4_mb_cleanup_pa(grinfo);
                        ext4_unlock_group(sb, i);
-                       kfree(grinfo);
+                       kmem_cache_free(cachep, grinfo);
                }
                num_meta_group_infos = (ngroups +
                                EXT4_DESC_PER_BLOCK(sb) - 1) >>
@@ -2558,7 +2605,7 @@ int ext4_mb_release(struct super_block *sb)
        return 0;
 }
 
-static inline void ext4_issue_discard(struct super_block *sb,
+static inline int ext4_issue_discard(struct super_block *sb,
                ext4_group_t block_group, ext4_grpblk_t block, int count)
 {
        int ret;
@@ -2568,10 +2615,11 @@ static inline void ext4_issue_discard(struct super_block *sb,
        trace_ext4_discard_blocks(sb,
                        (unsigned long long) discard_block, count);
        ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
-       if (ret == EOPNOTSUPP) {
+       if (ret == -EOPNOTSUPP) {
                ext4_warning(sb, "discard not supported, disabling");
                clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
        }
+       return ret;
 }
 
 /*
@@ -2659,28 +2707,22 @@ static void ext4_remove_debugfs_entry(void)
 
 #endif
 
-int __init init_ext4_mballoc(void)
+int __init ext4_init_mballoc(void)
 {
-       ext4_pspace_cachep =
-               kmem_cache_create("ext4_prealloc_space",
-                                    sizeof(struct ext4_prealloc_space),
-                                    0, SLAB_RECLAIM_ACCOUNT, NULL);
+       ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
+                                       SLAB_RECLAIM_ACCOUNT);
        if (ext4_pspace_cachep == NULL)
                return -ENOMEM;
 
-       ext4_ac_cachep =
-               kmem_cache_create("ext4_alloc_context",
-                                    sizeof(struct ext4_allocation_context),
-                                    0, SLAB_RECLAIM_ACCOUNT, NULL);
+       ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
+                                   SLAB_RECLAIM_ACCOUNT);
        if (ext4_ac_cachep == NULL) {
                kmem_cache_destroy(ext4_pspace_cachep);
                return -ENOMEM;
        }
 
-       ext4_free_ext_cachep =
-               kmem_cache_create("ext4_free_block_extents",
-                                    sizeof(struct ext4_free_data),
-                                    0, SLAB_RECLAIM_ACCOUNT, NULL);
+       ext4_free_ext_cachep = KMEM_CACHE(ext4_free_data,
+                                         SLAB_RECLAIM_ACCOUNT);
        if (ext4_free_ext_cachep == NULL) {
                kmem_cache_destroy(ext4_pspace_cachep);
                kmem_cache_destroy(ext4_ac_cachep);
@@ -2690,8 +2732,9 @@ int __init init_ext4_mballoc(void)
        return 0;
 }
 
-void exit_ext4_mballoc(void)
+void ext4_exit_mballoc(void)
 {
+       int i;
        /*
         * Wait for completion of call_rcu()'s on ext4_pspace_cachep
         * before destroying the slab cache.
@@ -2700,6 +2743,15 @@ void exit_ext4_mballoc(void)
        kmem_cache_destroy(ext4_pspace_cachep);
        kmem_cache_destroy(ext4_ac_cachep);
        kmem_cache_destroy(ext4_free_ext_cachep);
+
+       for (i = 0; i < NR_GRPINFO_CACHES; i++) {
+               struct kmem_cache *cachep = ext4_groupinfo_caches[i];
+               if (cachep) {
+                       char *name = (char *)kmem_cache_name(cachep);
+                       kmem_cache_destroy(cachep);
+                       kfree(name);
+               }
+       }
        ext4_remove_debugfs_entry();
 }
 
@@ -3536,8 +3588,7 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
  */
 static noinline_for_stack int
 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
-                       struct ext4_prealloc_space *pa,
-                       struct ext4_allocation_context *ac)
+                       struct ext4_prealloc_space *pa)
 {
        struct super_block *sb = e4b->bd_sb;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -3555,11 +3606,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
        end = bit + pa->pa_len;
 
-       if (ac) {
-               ac->ac_sb = sb;
-               ac->ac_inode = pa->pa_inode;
-       }
-
        while (bit < end) {
                bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
                if (bit >= end)
@@ -3570,16 +3616,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                         (unsigned) next - bit, (unsigned) group);
                free += next - bit;
 
-               if (ac) {
-                       ac->ac_b_ex.fe_group = group;
-                       ac->ac_b_ex.fe_start = bit;
-                       ac->ac_b_ex.fe_len = next - bit;
-                       ac->ac_b_ex.fe_logical = 0;
-                       trace_ext4_mballoc_discard(ac);
-               }
-
-               trace_ext4_mb_release_inode_pa(sb, ac, pa, grp_blk_start + bit,
-                                              next - bit);
+               trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
+               trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa,
+                                              grp_blk_start + bit, next - bit);
                mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
                bit = next + 1;
        }
@@ -3602,29 +3641,19 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
 
 static noinline_for_stack int
 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
-                               struct ext4_prealloc_space *pa,
-                               struct ext4_allocation_context *ac)
+                               struct ext4_prealloc_space *pa)
 {
        struct super_block *sb = e4b->bd_sb;
        ext4_group_t group;
        ext4_grpblk_t bit;
 
-       trace_ext4_mb_release_group_pa(sb, ac, pa);
+       trace_ext4_mb_release_group_pa(sb, pa);
        BUG_ON(pa->pa_deleted == 0);
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
        mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
        atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
-
-       if (ac) {
-               ac->ac_sb = sb;
-               ac->ac_inode = NULL;
-               ac->ac_b_ex.fe_group = group;
-               ac->ac_b_ex.fe_start = bit;
-               ac->ac_b_ex.fe_len = pa->pa_len;
-               ac->ac_b_ex.fe_logical = 0;
-               trace_ext4_mballoc_discard(ac);
-       }
+       trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
 
        return 0;
 }
@@ -3645,7 +3674,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
        struct ext4_group_info *grp = ext4_get_group_info(sb, group);
        struct buffer_head *bitmap_bh = NULL;
        struct ext4_prealloc_space *pa, *tmp;
-       struct ext4_allocation_context *ac;
        struct list_head list;
        struct ext4_buddy e4b;
        int err;
@@ -3674,9 +3702,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
                needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
 
        INIT_LIST_HEAD(&list);
-       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-       if (ac)
-               ac->ac_sb = sb;
 repeat:
        ext4_lock_group(sb, group);
        list_for_each_entry_safe(pa, tmp,
@@ -3731,9 +3756,9 @@ repeat:
                spin_unlock(pa->pa_obj_lock);
 
                if (pa->pa_type == MB_GROUP_PA)
-                       ext4_mb_release_group_pa(&e4b, pa, ac);
+                       ext4_mb_release_group_pa(&e4b, pa);
                else
-                       ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
+                       ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
 
                list_del(&pa->u.pa_tmp_list);
                call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
@@ -3741,8 +3766,6 @@ repeat:
 
 out:
        ext4_unlock_group(sb, group);
-       if (ac)
-               kmem_cache_free(ext4_ac_cachep, ac);
        ext4_mb_unload_buddy(&e4b);
        put_bh(bitmap_bh);
        return free;
@@ -3763,7 +3786,6 @@ void ext4_discard_preallocations(struct inode *inode)
        struct super_block *sb = inode->i_sb;
        struct buffer_head *bitmap_bh = NULL;
        struct ext4_prealloc_space *pa, *tmp;
-       struct ext4_allocation_context *ac;
        ext4_group_t group = 0;
        struct list_head list;
        struct ext4_buddy e4b;
@@ -3779,11 +3801,6 @@ void ext4_discard_preallocations(struct inode *inode)
 
        INIT_LIST_HEAD(&list);
 
-       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-       if (ac) {
-               ac->ac_sb = sb;
-               ac->ac_inode = inode;
-       }
 repeat:
        /* first, collect all pa's in the inode */
        spin_lock(&ei->i_prealloc_lock);
@@ -3853,7 +3870,7 @@ repeat:
 
                ext4_lock_group(sb, group);
                list_del(&pa->pa_group_list);
-               ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
+               ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
                ext4_unlock_group(sb, group);
 
                ext4_mb_unload_buddy(&e4b);
@@ -3862,8 +3879,6 @@ repeat:
                list_del(&pa->u.pa_tmp_list);
                call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
        }
-       if (ac)
-               kmem_cache_free(ext4_ac_cachep, ac);
 }
 
 /*
@@ -4061,14 +4076,10 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
        struct ext4_buddy e4b;
        struct list_head discard_list;
        struct ext4_prealloc_space *pa, *tmp;
-       struct ext4_allocation_context *ac;
 
        mb_debug(1, "discard locality group preallocation\n");
 
        INIT_LIST_HEAD(&discard_list);
-       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-       if (ac)
-               ac->ac_sb = sb;
 
        spin_lock(&lg->lg_prealloc_lock);
        list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
@@ -4120,15 +4131,13 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
                }
                ext4_lock_group(sb, group);
                list_del(&pa->pa_group_list);
-               ext4_mb_release_group_pa(&e4b, pa, ac);
+               ext4_mb_release_group_pa(&e4b, pa);
                ext4_unlock_group(sb, group);
 
                ext4_mb_unload_buddy(&e4b);
                list_del(&pa->u.pa_tmp_list);
                call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
        }
-       if (ac)
-               kmem_cache_free(ext4_ac_cachep, ac);
 }
 
 /*
@@ -4492,7 +4501,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
 {
        struct buffer_head *bitmap_bh = NULL;
        struct super_block *sb = inode->i_sb;
-       struct ext4_allocation_context *ac = NULL;
        struct ext4_group_desc *gdp;
        unsigned long freed = 0;
        unsigned int overflow;
@@ -4532,6 +4540,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
                        if (!bh)
                                tbh = sb_find_get_block(inode->i_sb,
                                                        block + i);
+                       if (unlikely(!tbh))
+                               continue;
                        ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
                                    inode, tbh, block + i);
                }
@@ -4547,12 +4557,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
        if (!ext4_should_writeback_data(inode))
                flags |= EXT4_FREE_BLOCKS_METADATA;
 
-       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-       if (ac) {
-               ac->ac_inode = inode;
-               ac->ac_sb = sb;
-       }
-
 do_more:
        overflow = 0;
        ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
@@ -4610,12 +4614,7 @@ do_more:
                        BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
        }
 #endif
-       if (ac) {
-               ac->ac_b_ex.fe_group = block_group;
-               ac->ac_b_ex.fe_start = bit;
-               ac->ac_b_ex.fe_len = count;
-               trace_ext4_mballoc_free(ac);
-       }
+       trace_ext4_mballoc_free(sb, inode, block_group, bit, count);
 
        err = ext4_mb_load_buddy(sb, block_group, &e4b);
        if (err)
@@ -4641,12 +4640,12 @@ do_more:
                 * with group lock held. generate_buddy look at
                 * them with group lock_held
                 */
+               if (test_opt(sb, DISCARD))
+                       ext4_issue_discard(sb, block_group, bit, count);
                ext4_lock_group(sb, block_group);
                mb_clear_bits(bitmap_bh->b_data, bit, count);
                mb_free_blocks(inode, &e4b, bit, count);
                ext4_mb_return_to_preallocation(inode, &e4b, block, count);
-               if (test_opt(sb, DISCARD))
-                       ext4_issue_discard(sb, block_group, bit, count);
        }
 
        ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4686,7 +4685,190 @@ error_return:
                dquot_free_block(inode, freed);
        brelse(bitmap_bh);
        ext4_std_error(sb, err);
-       if (ac)
-               kmem_cache_free(ext4_ac_cachep, ac);
        return;
 }
+
+/**
+ * ext4_trim_extent -- function to TRIM one single free extent in the group
+ * @sb:                super block for the file system
+ * @start:     starting block of the free extent in the alloc. group
+ * @count:     number of blocks to TRIM
+ * @group:     alloc. group we are working with
+ * @e4b:       ext4 buddy for the group
+ *
+ * Trim "count" blocks starting at "start" in the "group". To assure that no
+ * one will allocate those blocks, mark it as used in buddy bitmap. This must
+ * be called with under the group lock.
+ */
+static int ext4_trim_extent(struct super_block *sb, int start, int count,
+               ext4_group_t group, struct ext4_buddy *e4b)
+{
+       struct ext4_free_extent ex;
+       int ret = 0;
+
+       assert_spin_locked(ext4_group_lock_ptr(sb, group));
+
+       ex.fe_start = start;
+       ex.fe_group = group;
+       ex.fe_len = count;
+
+       /*
+        * Mark blocks used, so no one can reuse them while
+        * being trimmed.
+        */
+       mb_mark_used(e4b, &ex);
+       ext4_unlock_group(sb, group);
+
+       ret = ext4_issue_discard(sb, group, start, count);
+       if (ret)
+               ext4_std_error(sb, ret);
+
+       ext4_lock_group(sb, group);
+       mb_free_blocks(NULL, e4b, start, ex.fe_len);
+       return ret;
+}
+
+/**
+ * ext4_trim_all_free -- function to trim all free space in alloc. group
+ * @sb:                        super block for file system
+ * @e4b:               ext4 buddy
+ * @start:             first group block to examine
+ * @max:               last group block to examine
+ * @minblocks:         minimum extent block count
+ *
+ * ext4_trim_all_free walks through group's buddy bitmap searching for free
+ * extents. When the free block is found, ext4_trim_extent is called to TRIM
+ * the extent.
+ *
+ *
+ * ext4_trim_all_free walks through group's block bitmap searching for free
+ * extents. When the free extent is found, mark it as used in group buddy
+ * bitmap. Then issue a TRIM command on this extent and free the extent in
+ * the group buddy bitmap. This is done until whole group is scanned.
+ */
+ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
+               ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
+{
+       void *bitmap;
+       ext4_grpblk_t next, count = 0;
+       ext4_group_t group;
+       int ret = 0;
+
+       BUG_ON(e4b == NULL);
+
+       bitmap = e4b->bd_bitmap;
+       group = e4b->bd_group;
+       start = (e4b->bd_info->bb_first_free > start) ?
+               e4b->bd_info->bb_first_free : start;
+       ext4_lock_group(sb, group);
+
+       while (start < max) {
+               start = mb_find_next_zero_bit(bitmap, max, start);
+               if (start >= max)
+                       break;
+               next = mb_find_next_bit(bitmap, max, start);
+
+               if ((next - start) >= minblocks) {
+                       ret = ext4_trim_extent(sb, start,
+                               next - start, group, e4b);
+                       if (ret < 0)
+                               break;
+                       count += next - start;
+               }
+               start = next + 1;
+
+               if (fatal_signal_pending(current)) {
+                       count = -ERESTARTSYS;
+                       break;
+               }
+
+               if (need_resched()) {
+                       ext4_unlock_group(sb, group);
+                       cond_resched();
+                       ext4_lock_group(sb, group);
+               }
+
+               if ((e4b->bd_info->bb_free - count) < minblocks)
+                       break;
+       }
+       ext4_unlock_group(sb, group);
+
+       ext4_debug("trimmed %d blocks in the group %d\n",
+               count, group);
+
+       if (ret < 0)
+               count = ret;
+
+       return count;
+}
+
+/**
+ * ext4_trim_fs() -- trim ioctl handle function
+ * @sb:                        superblock for filesystem
+ * @range:             fstrim_range structure
+ *
+ * start:      First Byte to trim
+ * len:                number of Bytes to trim from start
+ * minlen:     minimum extent length in Bytes
+ * ext4_trim_fs goes through all allocation groups containing Bytes from
+ * start to start+len. For each such a group ext4_trim_all_free function
+ * is invoked to trim all free space.
+ */
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+       struct ext4_buddy e4b;
+       ext4_group_t first_group, last_group;
+       ext4_group_t group, ngroups = ext4_get_groups_count(sb);
+       ext4_grpblk_t cnt = 0, first_block, last_block;
+       uint64_t start, len, minlen, trimmed;
+       int ret = 0;
+
+       start = range->start >> sb->s_blocksize_bits;
+       len = range->len >> sb->s_blocksize_bits;
+       minlen = range->minlen >> sb->s_blocksize_bits;
+       trimmed = 0;
+
+       if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
+               return -EINVAL;
+
+       /* Determine first and last group to examine based on start and len */
+       ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
+                                    &first_group, &first_block);
+       ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) (start + len),
+                                    &last_group, &last_block);
+       last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
+       last_block = EXT4_BLOCKS_PER_GROUP(sb);
+
+       if (first_group > last_group)
+               return -EINVAL;
+
+       for (group = first_group; group <= last_group; group++) {
+               ret = ext4_mb_load_buddy(sb, group, &e4b);
+               if (ret) {
+                       ext4_error(sb, "Error in loading buddy "
+                                       "information for %u", group);
+                       break;
+               }
+
+               if (len >= EXT4_BLOCKS_PER_GROUP(sb))
+                       len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
+               else
+                       last_block = len;
+
+               if (e4b.bd_info->bb_free >= minlen) {
+                       cnt = ext4_trim_all_free(sb, &e4b, first_block,
+                                               last_block, minlen);
+                       if (cnt < 0) {
+                               ret = cnt;
+                               ext4_mb_unload_buddy(&e4b);
+                               break;
+                       }
+               }
+               ext4_mb_unload_buddy(&e4b);
+               trimmed += cnt;
+               first_block = 0;
+       }
+       range->len = trimmed * sb->s_blocksize;
+
+       return ret;
+}
index 1765c2c50a9b9b6c699c134ea44423700d59883c..25f3a974b725d30492f8b4db74f49285ea077db3 100644 (file)
@@ -412,7 +412,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
        struct buffer_head *bh;
        struct ext4_extent_header *eh;
 
-       block = idx_pblock(ix);
+       block = ext4_idx_pblock(ix);
        bh = sb_bread(inode->i_sb, block);
        if (!bh)
                return -EIO;
index 5f1ed9fc913c207d5bcc99f3ce63413a4a931ad8..b9f3e7862f13834b2c166b21a1b96d3e36dde428 100644 (file)
@@ -85,7 +85,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
        if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
                /* leaf block */
                *extent = ++path[ppos].p_ext;
-               path[ppos].p_block = ext_pblock(path[ppos].p_ext);
+               path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
                return 0;
        }
 
@@ -96,7 +96,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
 
                        /* index block */
                        path[ppos].p_idx++;
-                       path[ppos].p_block = idx_pblock(path[ppos].p_idx);
+                       path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
                        if (path[ppos+1].p_bh)
                                brelse(path[ppos+1].p_bh);
                        path[ppos+1].p_bh =
@@ -111,7 +111,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
                                path[cur_ppos].p_idx =
                                        EXT_FIRST_INDEX(path[cur_ppos].p_hdr);
                                path[cur_ppos].p_block =
-                                       idx_pblock(path[cur_ppos].p_idx);
+                                       ext4_idx_pblock(path[cur_ppos].p_idx);
                                if (path[cur_ppos+1].p_bh)
                                        brelse(path[cur_ppos+1].p_bh);
                                path[cur_ppos+1].p_bh = sb_bread(inode->i_sb,
@@ -133,7 +133,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
                        path[leaf_ppos].p_ext = *extent =
                                EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
                        path[leaf_ppos].p_block =
-                                       ext_pblock(path[leaf_ppos].p_ext);
+                                       ext4_ext_pblock(path[leaf_ppos].p_ext);
                        return 0;
                }
        }
@@ -249,7 +249,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
                         */
                        o_end->ee_block = end_ext->ee_block;
                        o_end->ee_len = end_ext->ee_len;
-                       ext4_ext_store_pblock(o_end, ext_pblock(end_ext));
+                       ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
                }
 
                o_start->ee_len = start_ext->ee_len;
@@ -276,7 +276,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
                 */
                o_end->ee_block = end_ext->ee_block;
                o_end->ee_len = end_ext->ee_len;
-               ext4_ext_store_pblock(o_end, ext_pblock(end_ext));
+               ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
 
                /*
                 * Set 0 to the extent block if new_ext was
@@ -361,7 +361,7 @@ mext_insert_inside_block(struct ext4_extent *o_start,
        /* Insert new entry */
        if (new_ext->ee_len) {
                o_start[i] = *new_ext;
-               ext4_ext_store_pblock(&o_start[i++], ext_pblock(new_ext));
+               ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext));
        }
 
        /* Insert end entry */
@@ -488,7 +488,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
        start_ext.ee_len = end_ext.ee_len = 0;
 
        new_ext.ee_block = cpu_to_le32(*from);
-       ext4_ext_store_pblock(&new_ext, ext_pblock(dext));
+       ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext));
        new_ext.ee_len = dext->ee_len;
        new_ext_alen = ext4_ext_get_actual_len(&new_ext);
        new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
@@ -553,7 +553,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
                copy_extent_status(oext, &end_ext);
                end_ext_alen = ext4_ext_get_actual_len(&end_ext);
                ext4_ext_store_pblock(&end_ext,
-                       (ext_pblock(o_end) + oext_alen - end_ext_alen));
+                       (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen));
                end_ext.ee_block =
                        cpu_to_le32(le32_to_cpu(o_end->ee_block) +
                        oext_alen - end_ext_alen);
@@ -604,7 +604,7 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
        /* When tmp_dext is too large, pick up the target range. */
        diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
 
-       ext4_ext_store_pblock(tmp_dext, ext_pblock(tmp_dext) + diff);
+       ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
        tmp_dext->ee_block =
                        cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff);
        tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff);
@@ -613,7 +613,7 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
                tmp_dext->ee_len = cpu_to_le16(max_count);
 
        orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block);
-       ext4_ext_store_pblock(tmp_oext, ext_pblock(tmp_oext) + orig_diff);
+       ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff);
 
        /* Adjust extent length if donor extent is larger than orig */
        if (ext4_ext_get_actual_len(tmp_dext) >
index bd39885b599854329922fa443a016e405aeac1f9..92203b8a099f076ebbc0d794b3356289096c663f 100644 (file)
@@ -856,6 +856,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
        struct buffer_head *bh_use[NAMEI_RA_SIZE];
        struct buffer_head *bh, *ret = NULL;
        ext4_lblk_t start, block, b;
+       const u8 *name = d_name->name;
        int ra_max = 0;         /* Number of bh's in the readahead
                                   buffer, bh_use[] */
        int ra_ptr = 0;         /* Current index into readahead
@@ -870,6 +871,16 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
        namelen = d_name->len;
        if (namelen > EXT4_NAME_LEN)
                return NULL;
+       if ((namelen <= 2) && (name[0] == '.') &&
+           (name[1] == '.' || name[1] == '0')) {
+               /*
+                * "." or ".." will only be in the first block
+                * NFS may look up ".."; "." should be handled by the VFS
+                */
+               block = start = 0;
+               nblocks = 1;
+               goto restart;
+       }
        if (is_dx(dir)) {
                bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
                /*
@@ -960,55 +971,35 @@ cleanup_and_exit:
 static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
                       struct ext4_dir_entry_2 **res_dir, int *err)
 {
-       struct super_block * sb;
+       struct super_block * sb = dir->i_sb;
        struct dx_hash_info     hinfo;
-       u32 hash;
        struct dx_frame frames[2], *frame;
-       struct ext4_dir_entry_2 *de, *top;
        struct buffer_head *bh;
        ext4_lblk_t block;
        int retval;
-       int namelen = d_name->len;
-       const u8 *name = d_name->name;
 
-       sb = dir->i_sb;
-       /* NFS may look up ".." - look at dx_root directory block */
-       if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
-               if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-                       return NULL;
-       } else {
-               frame = frames;
-               frame->bh = NULL;                       /* for dx_release() */
-               frame->at = (struct dx_entry *)frames;  /* hack for zero entry*/
-               dx_set_block(frame->at, 0);             /* dx_root block is 0 */
-       }
-       hash = hinfo.hash;
+       if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
+               return NULL;
        do {
                block = dx_get_block(frame->at);
-               if (!(bh = ext4_bread (NULL,dir, block, 0, err)))
+               if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
                        goto errout;
-               de = (struct ext4_dir_entry_2 *) bh->b_data;
-               top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
-                                      EXT4_DIR_REC_LEN(0));
-               for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
-                       int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
-                                 + ((char *) de - bh->b_data);
-
-                       if (!ext4_check_dir_entry(dir, de, bh, off)) {
-                               brelse(bh);
-                               *err = ERR_BAD_DX_DIR;
-                               goto errout;
-                       }
 
-                       if (ext4_match(namelen, name, de)) {
-                               *res_dir = de;
-                               dx_release(frames);
-                               return bh;
-                       }
+               retval = search_dirblock(bh, dir, d_name,
+                                        block << EXT4_BLOCK_SIZE_BITS(sb),
+                                        res_dir);
+               if (retval == 1) {      /* Success! */
+                       dx_release(frames);
+                       return bh;
                }
                brelse(bh);
+               if (retval == -1) {
+                       *err = ERR_BAD_DX_DIR;
+                       goto errout;
+               }
+
                /* Check to see if we should continue to search */
-               retval = ext4_htree_next_block(dir, hash, frame,
+               retval = ext4_htree_next_block(dir, hinfo.hash, frame,
                                               frames, NULL);
                if (retval < 0) {
                        ext4_warning(sb,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
new file mode 100644 (file)
index 0000000..46a7d6a
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * linux/fs/ext4/page-io.c
+ *
+ * This contains the new page_io functions for ext4
+ *
+ * Written by Theodore Ts'o, 2010.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/jbd2.h>
+#include <linux/highuid.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include <linux/mpage.h>
+#include <linux/namei.h>
+#include <linux/uio.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "ext4_jbd2.h"
+#include "xattr.h"
+#include "acl.h"
+#include "ext4_extents.h"
+
+static struct kmem_cache *io_page_cachep, *io_end_cachep;
+
+int __init ext4_init_pageio(void)
+{
+       io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
+       if (io_page_cachep == NULL)
+               return -ENOMEM;
+       io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
+       if (io_page_cachep == NULL) {
+               kmem_cache_destroy(io_page_cachep);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void ext4_exit_pageio(void)
+{
+       kmem_cache_destroy(io_end_cachep);
+       kmem_cache_destroy(io_page_cachep);
+}
+
+void ext4_free_io_end(ext4_io_end_t *io)
+{
+       int i;
+
+       BUG_ON(!io);
+       if (io->page)
+               put_page(io->page);
+       for (i = 0; i < io->num_io_pages; i++) {
+               if (--io->pages[i]->p_count == 0) {
+                       struct page *page = io->pages[i]->p_page;
+
+                       end_page_writeback(page);
+                       put_page(page);
+                       kmem_cache_free(io_page_cachep, io->pages[i]);
+               }
+       }
+       io->num_io_pages = 0;
+       iput(io->inode);
+       kmem_cache_free(io_end_cachep, io);
+}
+
+/*
+ * check a range of space and convert unwritten extents to written.
+ */
+int ext4_end_io_nolock(ext4_io_end_t *io)
+{
+       struct inode *inode = io->inode;
+       loff_t offset = io->offset;
+       ssize_t size = io->size;
+       int ret = 0;
+
+       ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
+                  "list->prev 0x%p\n",
+                  io, inode->i_ino, io->list.next, io->list.prev);
+
+       if (list_empty(&io->list))
+               return ret;
+
+       if (!(io->flag & EXT4_IO_END_UNWRITTEN))
+               return ret;
+
+       ret = ext4_convert_unwritten_extents(inode, offset, size);
+       if (ret < 0) {
+               printk(KERN_EMERG "%s: failed to convert unwritten "
+                       "extents to written extents, error is %d "
+                       "io is still on inode %lu aio dio list\n",
+                      __func__, ret, inode->i_ino);
+               return ret;
+       }
+
+       if (io->iocb)
+               aio_complete(io->iocb, io->result, 0);
+       /* clear the DIO AIO unwritten flag */
+       io->flag &= ~EXT4_IO_END_UNWRITTEN;
+       return ret;
+}
+
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_io_work(struct work_struct *work)
+{
+       ext4_io_end_t           *io = container_of(work, ext4_io_end_t, work);
+       struct inode            *inode = io->inode;
+       struct ext4_inode_info  *ei = EXT4_I(inode);
+       unsigned long           flags;
+       int                     ret;
+
+       mutex_lock(&inode->i_mutex);
+       ret = ext4_end_io_nolock(io);
+       if (ret < 0) {
+               mutex_unlock(&inode->i_mutex);
+               return;
+       }
+
+       spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+       if (!list_empty(&io->list))
+               list_del_init(&io->list);
+       spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+       mutex_unlock(&inode->i_mutex);
+       ext4_free_io_end(io);
+}
+
+ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
+{
+       ext4_io_end_t *io = NULL;
+
+       io = kmem_cache_alloc(io_end_cachep, flags);
+       if (io) {
+               memset(io, 0, sizeof(*io));
+               io->inode = igrab(inode);
+               BUG_ON(!io->inode);
+               INIT_WORK(&io->work, ext4_end_io_work);
+               INIT_LIST_HEAD(&io->list);
+       }
+       return io;
+}
+
+/*
+ * Print an buffer I/O error compatible with the fs/buffer.c.  This
+ * provides compatibility with dmesg scrapers that look for a specific
+ * buffer I/O error message.  We really need a unified error reporting
+ * structure to userspace ala Digital Unix's uerf system, but it's
+ * probably not going to happen in my lifetime, due to LKML politics...
+ */
+static void buffer_io_error(struct buffer_head *bh)
+{
+       char b[BDEVNAME_SIZE];
+       printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
+                       bdevname(bh->b_bdev, b),
+                       (unsigned long long)bh->b_blocknr);
+}
+
+static void ext4_end_bio(struct bio *bio, int error)
+{
+       ext4_io_end_t *io_end = bio->bi_private;
+       struct workqueue_struct *wq;
+       struct inode *inode;
+       unsigned long flags;
+       ext4_fsblk_t err_block;
+       int i;
+
+       BUG_ON(!io_end);
+       inode = io_end->inode;
+       bio->bi_private = NULL;
+       bio->bi_end_io = NULL;
+       if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+               error = 0;
+       err_block = bio->bi_sector >> (inode->i_blkbits - 9);
+       bio_put(bio);
+
+       if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
+               pr_err("sb umounted, discard end_io request for inode %lu\n",
+                       io_end->inode->i_ino);
+               ext4_free_io_end(io_end);
+               return;
+       }
+
+       if (error) {
+               io_end->flag |= EXT4_IO_END_ERROR;
+               ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+                            "(offset %llu size %ld starting block %llu)",
+                            inode->i_ino,
+                            (unsigned long long) io_end->offset,
+                            (long) io_end->size,
+                            (unsigned long long) err_block);
+       }
+
+       for (i = 0; i < io_end->num_io_pages; i++) {
+               struct page *page = io_end->pages[i]->p_page;
+               struct buffer_head *bh, *head;
+               int partial_write = 0;
+
+               head = page_buffers(page);
+               if (error)
+                       SetPageError(page);
+               BUG_ON(!head);
+               if (head->b_size == PAGE_CACHE_SIZE)
+                       clear_buffer_dirty(head);
+               else {
+                       loff_t offset;
+                       loff_t io_end_offset = io_end->offset + io_end->size;
+
+                       offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
+                       bh = head;
+                       do {
+                               if ((offset >= io_end->offset) &&
+                                   (offset+bh->b_size <= io_end_offset)) {
+                                       if (error)
+                                               buffer_io_error(bh);
+
+                                       clear_buffer_dirty(bh);
+                               }
+                               if (buffer_delay(bh))
+                                       partial_write = 1;
+                               else if (!buffer_mapped(bh))
+                                       clear_buffer_dirty(bh);
+                               else if (buffer_dirty(bh))
+                                       partial_write = 1;
+                               offset += bh->b_size;
+                               bh = bh->b_this_page;
+                       } while (bh != head);
+               }
+
+               if (--io_end->pages[i]->p_count == 0) {
+                       struct page *page = io_end->pages[i]->p_page;
+
+                       end_page_writeback(page);
+                       put_page(page);
+                       kmem_cache_free(io_page_cachep, io_end->pages[i]);
+               }
+
+               /*
+                * If this is a partial write which happened to make
+                * all buffers uptodate then we can optimize away a
+                * bogus readpage() for the next read(). Here we
+                * 'discover' whether the page went uptodate as a
+                * result of this (potentially partial) write.
+                */
+               if (!partial_write)
+                       SetPageUptodate(page);
+       }
+
+       io_end->num_io_pages = 0;
+
+       /* Add the io_end to per-inode completed io list*/
+       spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+       list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
+       spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
+
+       wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
+       /* queue the work to convert unwritten extents to written */
+       queue_work(wq, &io_end->work);
+}
+
+void ext4_io_submit(struct ext4_io_submit *io)
+{
+       struct bio *bio = io->io_bio;
+
+       if (bio) {
+               bio_get(io->io_bio);
+               submit_bio(io->io_op, io->io_bio);
+               BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
+               bio_put(io->io_bio);
+       }
+       io->io_bio = 0;
+       io->io_op = 0;
+       io->io_end = 0;
+}
+
+static int io_submit_init(struct ext4_io_submit *io,
+                         struct inode *inode,
+                         struct writeback_control *wbc,
+                         struct buffer_head *bh)
+{
+       ext4_io_end_t *io_end;
+       struct page *page = bh->b_page;
+       int nvecs = bio_get_nr_vecs(bh->b_bdev);
+       struct bio *bio;
+
+       io_end = ext4_init_io_end(inode, GFP_NOFS);
+       if (!io_end)
+               return -ENOMEM;
+       do {
+               bio = bio_alloc(GFP_NOIO, nvecs);
+               nvecs >>= 1;
+       } while (bio == NULL);
+
+       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_bdev = bh->b_bdev;
+       bio->bi_private = io->io_end = io_end;
+       bio->bi_end_io = ext4_end_bio;
+
+       io_end->inode = inode;
+       io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
+
+       io->io_bio = bio;
+       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?
+                       WRITE_SYNC_PLUG : WRITE);
+       io->io_next_block = bh->b_blocknr;
+       return 0;
+}
+
+static int io_submit_add_bh(struct ext4_io_submit *io,
+                           struct ext4_io_page *io_page,
+                           struct inode *inode,
+                           struct writeback_control *wbc,
+                           struct buffer_head *bh)
+{
+       ext4_io_end_t *io_end;
+       int ret;
+
+       if (buffer_new(bh)) {
+               clear_buffer_new(bh);
+               unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+       }
+
+       if (!buffer_mapped(bh) || buffer_delay(bh)) {
+               if (!buffer_mapped(bh))
+                       clear_buffer_dirty(bh);
+               if (io->io_bio)
+                       ext4_io_submit(io);
+               return 0;
+       }
+
+       if (io->io_bio && bh->b_blocknr != io->io_next_block) {
+submit_and_retry:
+               ext4_io_submit(io);
+       }
+       if (io->io_bio == NULL) {
+               ret = io_submit_init(io, inode, wbc, bh);
+               if (ret)
+                       return ret;
+       }
+       io_end = io->io_end;
+       if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
+           (io_end->pages[io_end->num_io_pages-1] != io_page))
+               goto submit_and_retry;
+       if (buffer_uninit(bh))
+               io->io_end->flag |= EXT4_IO_END_UNWRITTEN;
+       io->io_end->size += bh->b_size;
+       io->io_next_block++;
+       ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+       if (ret != bh->b_size)
+               goto submit_and_retry;
+       if ((io_end->num_io_pages == 0) ||
+           (io_end->pages[io_end->num_io_pages-1] != io_page)) {
+               io_end->pages[io_end->num_io_pages++] = io_page;
+               io_page->p_count++;
+       }
+       return 0;
+}
+
+int ext4_bio_write_page(struct ext4_io_submit *io,
+                       struct page *page,
+                       int len,
+                       struct writeback_control *wbc)
+{
+       struct inode *inode = page->mapping->host;
+       unsigned block_start, block_end, blocksize;
+       struct ext4_io_page *io_page;
+       struct buffer_head *bh, *head;
+       int ret = 0;
+
+       blocksize = 1 << inode->i_blkbits;
+
+       BUG_ON(PageWriteback(page));
+       set_page_writeback(page);
+       ClearPageError(page);
+
+       io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
+       if (!io_page) {
+               set_page_dirty(page);
+               unlock_page(page);
+               return -ENOMEM;
+       }
+       io_page->p_page = page;
+       io_page->p_count = 0;
+       get_page(page);
+
+       for (bh = head = page_buffers(page), block_start = 0;
+            bh != head || !block_start;
+            block_start = block_end, bh = bh->b_this_page) {
+               block_end = block_start + blocksize;
+               if (block_start >= len) {
+                       clear_buffer_dirty(bh);
+                       set_buffer_uptodate(bh);
+                       continue;
+               }
+               ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
+               if (ret) {
+                       /*
+                        * We only get here on ENOMEM.  Not much else
+                        * we can do but mark the page as dirty, and
+                        * better luck next time.
+                        */
+                       set_page_dirty(page);
+                       break;
+               }
+       }
+       unlock_page(page);
+       /*
+        * If the page was truncated before we could do the writeback,
+        * or we had a memory allocation error while trying to write
+        * the first buffer head, we won't have submitted any pages for
+        * I/O.  In that case we need to make sure we've cleared the
+        * PageWriteback bit from the page to prevent the system from
+        * wedging later on.
+        */
+       if (io_page->p_count == 0) {
+               put_page(page);
+               end_page_writeback(page);
+               kmem_cache_free(io_page_cachep, io_page);
+       }
+       return ret;
+}
index ca5c8aa00a2fe10a621348913a0c85908c6e7f6e..dc963929de652cb997550e38338855823832e53c 100644 (file)
@@ -226,23 +226,13 @@ static int setup_new_group_blocks(struct super_block *sb,
        }
 
        /* Zero out all of the reserved backup group descriptor table blocks */
-       for (i = 0, bit = gdblocks + 1, block = start + bit;
-            i < reserved_gdb; i++, block++, bit++) {
-               struct buffer_head *gdb;
-
-               ext4_debug("clear reserved block %#04llx (+%d)\n", block, bit);
-
-               if ((err = extend_or_restart_transaction(handle, 1, bh)))
-                       goto exit_bh;
+       ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+                       block, sbi->s_itb_per_group);
+       err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
+                              GFP_NOFS);
+       if (err)
+               goto exit_bh;
 
-               if (IS_ERR(gdb = bclean(handle, sb, block))) {
-                       err = PTR_ERR(gdb);
-                       goto exit_bh;
-               }
-               ext4_handle_dirty_metadata(handle, NULL, gdb);
-               ext4_set_bit(bit, bh->b_data);
-               brelse(gdb);
-       }
        ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
                   input->block_bitmap - start);
        ext4_set_bit(input->block_bitmap - start, bh->b_data);
@@ -251,28 +241,18 @@ static int setup_new_group_blocks(struct super_block *sb,
        ext4_set_bit(input->inode_bitmap - start, bh->b_data);
 
        /* Zero out all of the inode table blocks */
-       for (i = 0, block = input->inode_table, bit = block - start;
-            i < sbi->s_itb_per_group; i++, bit++, block++) {
-               struct buffer_head *it;
-
-               ext4_debug("clear inode block %#04llx (+%d)\n", block, bit);
-
-               if ((err = extend_or_restart_transaction(handle, 1, bh)))
-                       goto exit_bh;
-
-               if (IS_ERR(it = bclean(handle, sb, block))) {
-                       err = PTR_ERR(it);
-                       goto exit_bh;
-               }
-               ext4_handle_dirty_metadata(handle, NULL, it);
-               brelse(it);
-               ext4_set_bit(bit, bh->b_data);
-       }
+       block = input->inode_table;
+       ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+                       block, sbi->s_itb_per_group);
+       err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
+       if (err)
+               goto exit_bh;
 
        if ((err = extend_or_restart_transaction(handle, 2, bh)))
                goto exit_bh;
 
-       mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data);
+       ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
+                            bh->b_data);
        ext4_handle_dirty_metadata(handle, NULL, bh);
        brelse(bh);
        /* Mark unused entries in inode bitmap used */
@@ -283,8 +263,8 @@ static int setup_new_group_blocks(struct super_block *sb,
                goto exit_journal;
        }
 
-       mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
-                       bh->b_data);
+       ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+                            bh->b_data);
        ext4_handle_dirty_metadata(handle, NULL, bh);
 exit_bh:
        brelse(bh);
index 8ecc1e590303841b0c5f13d07568082230acdf4d..0348ce0665929f45933bb04fa538d2c18fe863f8 100644 (file)
@@ -40,6 +40,9 @@
 #include <linux/crc16.h>
 #include <asm/uaccess.h>
 
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
 #include "ext4.h"
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/ext4.h>
 
-struct proc_dir_entry *ext4_proc_root;
+static struct proc_dir_entry *ext4_proc_root;
 static struct kset *ext4_kset;
+struct ext4_lazy_init *ext4_li_info;
+struct mutex ext4_li_mtx;
+struct ext4_features *ext4_feat;
 
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
                             unsigned long journal_devnum);
@@ -69,6 +75,8 @@ static void ext4_write_super(struct super_block *sb);
 static int ext4_freeze(struct super_block *sb);
 static int ext4_get_sb(struct file_system_type *fs_type, int flags,
                       const char *dev_name, void *data, struct vfsmount *mnt);
+static void ext4_destroy_lazyinit_thread(void);
+static void ext4_unregister_li_request(struct super_block *sb);
 
 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
@@ -701,6 +709,7 @@ static void ext4_put_super(struct super_block *sb)
        struct ext4_super_block *es = sbi->s_es;
        int i, err;
 
+       ext4_unregister_li_request(sb);
        dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
 
        flush_workqueue(sbi->dio_unwritten_wq);
@@ -717,6 +726,7 @@ static void ext4_put_super(struct super_block *sb)
                        ext4_abort(sb, "Couldn't clean up the journal");
        }
 
+       del_timer(&sbi->s_err_report);
        ext4_release_system_zone(sb);
        ext4_mb_release(sb);
        ext4_ext_release(sb);
@@ -1042,6 +1052,12 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
            !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
                seq_puts(seq, ",block_validity");
 
+       if (!test_opt(sb, INIT_INODE_TABLE))
+               seq_puts(seq, ",noinit_inode_table");
+       else if (sbi->s_li_wait_mult)
+               seq_printf(seq, ",init_inode_table=%u",
+                          (unsigned) sbi->s_li_wait_mult);
+
        ext4_show_quota_options(seq, sb);
 
        return 0;
@@ -1170,6 +1186,7 @@ static const struct super_operations ext4_sops = {
        .quota_write    = ext4_quota_write,
 #endif
        .bdev_try_to_free_page = bdev_try_to_free_page,
+       .trim_fs        = ext4_trim_fs
 };
 
 static const struct super_operations ext4_nojournal_sops = {
@@ -1216,6 +1233,7 @@ enum {
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
        Opt_discard, Opt_nodiscard,
+       Opt_init_inode_table, Opt_noinit_inode_table,
 };
 
 static const match_table_t tokens = {
@@ -1286,6 +1304,9 @@ static const match_table_t tokens = {
        {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
+       {Opt_init_inode_table, "init_itable=%u"},
+       {Opt_init_inode_table, "init_itable"},
+       {Opt_noinit_inode_table, "noinit_itable"},
        {Opt_err, NULL},
 };
 
@@ -1756,6 +1777,20 @@ set_qf_format:
                case Opt_dioread_lock:
                        clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
                        break;
+               case Opt_init_inode_table:
+                       set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+                       if (args[0].from) {
+                               if (match_int(&args[0], &option))
+                                       return 0;
+                       } else
+                               option = EXT4_DEF_LI_WAIT_MULT;
+                       if (option < 0)
+                               return 0;
+                       sbi->s_li_wait_mult = option;
+                       break;
+               case Opt_noinit_inode_table:
+                       clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+                       break;
                default:
                        ext4_msg(sb, KERN_ERR,
                               "Unrecognized mount option \"%s\" "
@@ -1939,7 +1974,8 @@ int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
 }
 
 /* Called at mount-time, super-block is locked */
-static int ext4_check_descriptors(struct super_block *sb)
+static int ext4_check_descriptors(struct super_block *sb,
+                                 ext4_group_t *first_not_zeroed)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
@@ -1948,7 +1984,7 @@ static int ext4_check_descriptors(struct super_block *sb)
        ext4_fsblk_t inode_bitmap;
        ext4_fsblk_t inode_table;
        int flexbg_flag = 0;
-       ext4_group_t i;
+       ext4_group_t i, grp = sbi->s_groups_count;
 
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
                flexbg_flag = 1;
@@ -1964,6 +2000,10 @@ static int ext4_check_descriptors(struct super_block *sb)
                        last_block = first_block +
                                (EXT4_BLOCKS_PER_GROUP(sb) - 1);
 
+               if ((grp == sbi->s_groups_count) &&
+                  !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+                       grp = i;
+
                block_bitmap = ext4_block_bitmap(sb, gdp);
                if (block_bitmap < first_block || block_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2001,6 +2041,8 @@ static int ext4_check_descriptors(struct super_block *sb)
                if (!flexbg_flag)
                        first_block += EXT4_BLOCKS_PER_GROUP(sb);
        }
+       if (NULL != first_not_zeroed)
+               *first_not_zeroed = grp;
 
        ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
        sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
@@ -2373,6 +2415,7 @@ static struct ext4_attr ext4_attr_##_name = {                     \
 #define EXT4_ATTR(name, mode, show, store) \
 static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
 
+#define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL)
 #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
 #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
 #define EXT4_RW_ATTR_SBI_UI(name, elname)      \
@@ -2409,6 +2452,16 @@ static struct attribute *ext4_attrs[] = {
        NULL,
 };
 
+/* Features this copy of ext4 supports */
+EXT4_INFO_ATTR(lazy_itable_init);
+EXT4_INFO_ATTR(batched_discard);
+
+static struct attribute *ext4_feat_attrs[] = {
+       ATTR_LIST(lazy_itable_init),
+       ATTR_LIST(batched_discard),
+       NULL,
+};
+
 static ssize_t ext4_attr_show(struct kobject *kobj,
                              struct attribute *attr, char *buf)
 {
@@ -2437,7 +2490,6 @@ static void ext4_sb_release(struct kobject *kobj)
        complete(&sbi->s_kobj_unregister);
 }
 
-
 static const struct sysfs_ops ext4_attr_ops = {
        .show   = ext4_attr_show,
        .store  = ext4_attr_store,
@@ -2449,6 +2501,17 @@ static struct kobj_type ext4_ktype = {
        .release        = ext4_sb_release,
 };
 
+static void ext4_feat_release(struct kobject *kobj)
+{
+       complete(&ext4_feat->f_kobj_unregister);
+}
+
+static struct kobj_type ext4_feat_ktype = {
+       .default_attrs  = ext4_feat_attrs,
+       .sysfs_ops      = &ext4_attr_ops,
+       .release        = ext4_feat_release,
+};
+
 /*
  * Check whether this filesystem can be mounted based on
  * the features present and the RDONLY/RDWR mount requested.
@@ -2539,6 +2602,372 @@ static void print_daily_error_info(unsigned long arg)
        mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
 }
 
+static void ext4_lazyinode_timeout(unsigned long data)
+{
+       struct task_struct *p = (struct task_struct *)data;
+       wake_up_process(p);
+}
+
+/* Find next suitable group and run ext4_init_inode_table */
+static int ext4_run_li_request(struct ext4_li_request *elr)
+{
+       struct ext4_group_desc *gdp = NULL;
+       ext4_group_t group, ngroups;
+       struct super_block *sb;
+       unsigned long timeout = 0;
+       int ret = 0;
+
+       sb = elr->lr_super;
+       ngroups = EXT4_SB(sb)->s_groups_count;
+
+       for (group = elr->lr_next_group; group < ngroups; group++) {
+               gdp = ext4_get_group_desc(sb, group, NULL);
+               if (!gdp) {
+                       ret = 1;
+                       break;
+               }
+
+               if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+                       break;
+       }
+
+       if (group == ngroups)
+               ret = 1;
+
+       if (!ret) {
+               timeout = jiffies;
+               ret = ext4_init_inode_table(sb, group,
+                                           elr->lr_timeout ? 0 : 1);
+               if (elr->lr_timeout == 0) {
+                       timeout = jiffies - timeout;
+                       if (elr->lr_sbi->s_li_wait_mult)
+                               timeout *= elr->lr_sbi->s_li_wait_mult;
+                       else
+                               timeout *= 20;
+                       elr->lr_timeout = timeout;
+               }
+               elr->lr_next_sched = jiffies + elr->lr_timeout;
+               elr->lr_next_group = group + 1;
+       }
+
+       return ret;
+}
+
+/*
+ * Remove lr_request from the list_request and free the
+ * request tructure. Should be called with li_list_mtx held
+ */
+static void ext4_remove_li_request(struct ext4_li_request *elr)
+{
+       struct ext4_sb_info *sbi;
+
+       if (!elr)
+               return;
+
+       sbi = elr->lr_sbi;
+
+       list_del(&elr->lr_request);
+       sbi->s_li_request = NULL;
+       kfree(elr);
+}
+
+static void ext4_unregister_li_request(struct super_block *sb)
+{
+       struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request;
+
+       if (!ext4_li_info)
+               return;
+
+       mutex_lock(&ext4_li_info->li_list_mtx);
+       ext4_remove_li_request(elr);
+       mutex_unlock(&ext4_li_info->li_list_mtx);
+}
+
+/*
+ * This is the function where ext4lazyinit thread lives. It walks
+ * through the request list searching for next scheduled filesystem.
+ * When such a fs is found, run the lazy initialization request
+ * (ext4_rn_li_request) and keep track of the time spend in this
+ * function. Based on that time we compute next schedule time of
+ * the request. When walking through the list is complete, compute
+ * next waking time and put itself into sleep.
+ */
+static int ext4_lazyinit_thread(void *arg)
+{
+       struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
+       struct list_head *pos, *n;
+       struct ext4_li_request *elr;
+       unsigned long next_wakeup;
+       DEFINE_WAIT(wait);
+       int ret;
+
+       BUG_ON(NULL == eli);
+
+       eli->li_timer.data = (unsigned long)current;
+       eli->li_timer.function = ext4_lazyinode_timeout;
+
+       eli->li_task = current;
+       wake_up(&eli->li_wait_task);
+
+cont_thread:
+       while (true) {
+               next_wakeup = MAX_JIFFY_OFFSET;
+
+               mutex_lock(&eli->li_list_mtx);
+               if (list_empty(&eli->li_request_list)) {
+                       mutex_unlock(&eli->li_list_mtx);
+                       goto exit_thread;
+               }
+
+               list_for_each_safe(pos, n, &eli->li_request_list) {
+                       elr = list_entry(pos, struct ext4_li_request,
+                                        lr_request);
+
+                       if (time_after_eq(jiffies, elr->lr_next_sched))
+                               ret = ext4_run_li_request(elr);
+
+                       if (ret) {
+                               ret = 0;
+                               ext4_remove_li_request(elr);
+                               continue;
+                       }
+
+                       if (time_before(elr->lr_next_sched, next_wakeup))
+                               next_wakeup = elr->lr_next_sched;
+               }
+               mutex_unlock(&eli->li_list_mtx);
+
+               if (freezing(current))
+                       refrigerator();
+
+               if (time_after_eq(jiffies, next_wakeup)) {
+                       cond_resched();
+                       continue;
+               }
+
+               eli->li_timer.expires = next_wakeup;
+               add_timer(&eli->li_timer);
+               prepare_to_wait(&eli->li_wait_daemon, &wait,
+                               TASK_INTERRUPTIBLE);
+               if (time_before(jiffies, next_wakeup))
+                       schedule();
+               finish_wait(&eli->li_wait_daemon, &wait);
+       }
+
+exit_thread:
+       /*
+        * It looks like the request list is empty, but we need
+        * to check it under the li_list_mtx lock, to prevent any
+        * additions into it, and of course we should lock ext4_li_mtx
+        * to atomically free the list and ext4_li_info, because at
+        * this point another ext4 filesystem could be registering
+        * new one.
+        */
+       mutex_lock(&ext4_li_mtx);
+       mutex_lock(&eli->li_list_mtx);
+       if (!list_empty(&eli->li_request_list)) {
+               mutex_unlock(&eli->li_list_mtx);
+               mutex_unlock(&ext4_li_mtx);
+               goto cont_thread;
+       }
+       mutex_unlock(&eli->li_list_mtx);
+       del_timer_sync(&ext4_li_info->li_timer);
+       eli->li_task = NULL;
+       wake_up(&eli->li_wait_task);
+
+       kfree(ext4_li_info);
+       ext4_li_info = NULL;
+       mutex_unlock(&ext4_li_mtx);
+
+       return 0;
+}
+
+static void ext4_clear_request_list(void)
+{
+       struct list_head *pos, *n;
+       struct ext4_li_request *elr;
+
+       mutex_lock(&ext4_li_info->li_list_mtx);
+       if (list_empty(&ext4_li_info->li_request_list))
+               return;
+
+       list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
+               elr = list_entry(pos, struct ext4_li_request,
+                                lr_request);
+               ext4_remove_li_request(elr);
+       }
+       mutex_unlock(&ext4_li_info->li_list_mtx);
+}
+
+static int ext4_run_lazyinit_thread(void)
+{
+       struct task_struct *t;
+
+       t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit");
+       if (IS_ERR(t)) {
+               int err = PTR_ERR(t);
+               ext4_clear_request_list();
+               del_timer_sync(&ext4_li_info->li_timer);
+               kfree(ext4_li_info);
+               ext4_li_info = NULL;
+               printk(KERN_CRIT "EXT4: error %d creating inode table "
+                                "initialization thread\n",
+                                err);
+               return err;
+       }
+       ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
+
+       wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task != NULL);
+       return 0;
+}
+
+/*
+ * Check whether it make sense to run itable init. thread or not.
+ * If there is at least one uninitialized inode table, return
+ * corresponding group number, else the loop goes through all
+ * groups and return total number of groups.
+ */
+static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
+{
+       ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
+       struct ext4_group_desc *gdp = NULL;
+
+       for (group = 0; group < ngroups; group++) {
+               gdp = ext4_get_group_desc(sb, group, NULL);
+               if (!gdp)
+                       continue;
+
+               if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+                       break;
+       }
+
+       return group;
+}
+
+static int ext4_li_info_new(void)
+{
+       struct ext4_lazy_init *eli = NULL;
+
+       eli = kzalloc(sizeof(*eli), GFP_KERNEL);
+       if (!eli)
+               return -ENOMEM;
+
+       eli->li_task = NULL;
+       INIT_LIST_HEAD(&eli->li_request_list);
+       mutex_init(&eli->li_list_mtx);
+
+       init_waitqueue_head(&eli->li_wait_daemon);
+       init_waitqueue_head(&eli->li_wait_task);
+       init_timer(&eli->li_timer);
+       eli->li_state |= EXT4_LAZYINIT_QUIT;
+
+       ext4_li_info = eli;
+
+       return 0;
+}
+
+static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
+                                           ext4_group_t start)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_li_request *elr;
+       unsigned long rnd;
+
+       elr = kzalloc(sizeof(*elr), GFP_KERNEL);
+       if (!elr)
+               return NULL;
+
+       elr->lr_super = sb;
+       elr->lr_sbi = sbi;
+       elr->lr_next_group = start;
+
+       /*
+        * Randomize first schedule time of the request to
+        * spread the inode table initialization requests
+        * better.
+        */
+       get_random_bytes(&rnd, sizeof(rnd));
+       elr->lr_next_sched = jiffies + (unsigned long)rnd %
+                            (EXT4_DEF_LI_MAX_START_DELAY * HZ);
+
+       return elr;
+}
+
+static int ext4_register_li_request(struct super_block *sb,
+                                   ext4_group_t first_not_zeroed)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_li_request *elr;
+       ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
+       int ret;
+
+       if (sbi->s_li_request != NULL)
+               return 0;
+
+       if (first_not_zeroed == ngroups ||
+           (sb->s_flags & MS_RDONLY) ||
+           !test_opt(sb, INIT_INODE_TABLE)) {
+               sbi->s_li_request = NULL;
+               return 0;
+       }
+
+       if (first_not_zeroed == ngroups) {
+               sbi->s_li_request = NULL;
+               return 0;
+       }
+
+       elr = ext4_li_request_new(sb, first_not_zeroed);
+       if (!elr)
+               return -ENOMEM;
+
+       mutex_lock(&ext4_li_mtx);
+
+       if (NULL == ext4_li_info) {
+               ret = ext4_li_info_new();
+               if (ret)
+                       goto out;
+       }
+
+       mutex_lock(&ext4_li_info->li_list_mtx);
+       list_add(&elr->lr_request, &ext4_li_info->li_request_list);
+       mutex_unlock(&ext4_li_info->li_list_mtx);
+
+       sbi->s_li_request = elr;
+
+       if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
+               ret = ext4_run_lazyinit_thread();
+               if (ret)
+                       goto out;
+       }
+out:
+       mutex_unlock(&ext4_li_mtx);
+       if (ret)
+               kfree(elr);
+       return ret;
+}
+
+/*
+ * We do not need to lock anything since this is called on
+ * module unload.
+ */
+static void ext4_destroy_lazyinit_thread(void)
+{
+       /*
+        * If thread exited earlier
+        * there's nothing to be done.
+        */
+       if (!ext4_li_info)
+               return;
+
+       ext4_clear_request_list();
+
+       while (ext4_li_info->li_task) {
+               wake_up(&ext4_li_info->li_wait_daemon);
+               wait_event(ext4_li_info->li_wait_task,
+                          ext4_li_info->li_task == NULL);
+       }
+}
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                __releases(kernel_lock)
                                __acquires(kernel_lock)
@@ -2564,6 +2993,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        __u64 blocks_count;
        int err;
        unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
+       ext4_group_t first_not_zeroed;
 
        sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
        if (!sbi)
@@ -2624,6 +3054,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
        /* Set defaults before we parse the mount options */
        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+       set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
        if (def_mount_opts & EXT4_DEFM_DEBUG)
                set_opt(sbi->s_mount_opt, DEBUG);
        if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
@@ -2901,7 +3332,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
-       if (!ext4_check_descriptors(sb)) {
+       if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
                goto failed_mount2;
        }
@@ -3122,6 +3553,10 @@ no_journal:
                goto failed_mount4;
        }
 
+       err = ext4_register_li_request(sb, first_not_zeroed);
+       if (err)
+               goto failed_mount4;
+
        sbi->s_kobj.kset = ext4_kset;
        init_completion(&sbi->s_kobj_unregister);
        err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL,
@@ -3461,7 +3896,7 @@ static int ext4_load_journal(struct super_block *sb,
        EXT4_SB(sb)->s_journal = journal;
        ext4_clear_journal_err(sb, es);
 
-       if (journal_devnum &&
+       if (!really_read_only && journal_devnum &&
            journal_devnum != le32_to_cpu(es->s_journal_dev)) {
                es->s_journal_dev = cpu_to_le32(journal_devnum);
 
@@ -3514,9 +3949,12 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        else
                es->s_kbytes_written =
                        cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
-       ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
+       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter))
+               ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
                                        &EXT4_SB(sb)->s_freeblocks_counter));
-       es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
+       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+               es->s_free_inodes_count =
+                       cpu_to_le32(percpu_counter_sum_positive(
                                        &EXT4_SB(sb)->s_freeinodes_counter));
        sb->s_dirt = 0;
        BUFFER_TRACE(sbh, "marking dirty");
@@ -3835,6 +4273,19 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                        enable_quota = 1;
                }
        }
+
+       /*
+        * Reinitialize lazy itable initialization thread based on
+        * current settings
+        */
+       if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
+               ext4_unregister_li_request(sb);
+       else {
+               ext4_group_t first_not_zeroed;
+               first_not_zeroed = ext4_has_uninit_itable(sb);
+               ext4_register_li_request(sb, first_not_zeroed);
+       }
+
        ext4_setup_system_zone(sb);
        if (sbi->s_journal == NULL)
                ext4_commit_super(sb, 1);
@@ -4276,23 +4727,53 @@ static struct file_system_type ext4_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static int __init init_ext4_fs(void)
+int __init ext4_init_feat_adverts(void)
+{
+       struct ext4_features *ef;
+       int ret = -ENOMEM;
+
+       ef = kzalloc(sizeof(struct ext4_features), GFP_KERNEL);
+       if (!ef)
+               goto out;
+
+       ef->f_kobj.kset = ext4_kset;
+       init_completion(&ef->f_kobj_unregister);
+       ret = kobject_init_and_add(&ef->f_kobj, &ext4_feat_ktype, NULL,
+                                  "features");
+       if (ret) {
+               kfree(ef);
+               goto out;
+       }
+
+       ext4_feat = ef;
+       ret = 0;
+out:
+       return ret;
+}
+
+static int __init ext4_init_fs(void)
 {
        int err;
 
        ext4_check_flag_values();
-       err = init_ext4_system_zone();
+       err = ext4_init_pageio();
        if (err)
                return err;
+       err = ext4_init_system_zone();
+       if (err)
+               goto out5;
        ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
        if (!ext4_kset)
                goto out4;
        ext4_proc_root = proc_mkdir("fs/ext4", NULL);
-       err = init_ext4_mballoc();
+
+       err = ext4_init_feat_adverts();
+
+       err = ext4_init_mballoc();
        if (err)
                goto out3;
 
-       err = init_ext4_xattr();
+       err = ext4_init_xattr();
        if (err)
                goto out2;
        err = init_inodecache();
@@ -4303,38 +4784,46 @@ static int __init init_ext4_fs(void)
        err = register_filesystem(&ext4_fs_type);
        if (err)
                goto out;
+
+       ext4_li_info = NULL;
+       mutex_init(&ext4_li_mtx);
        return 0;
 out:
        unregister_as_ext2();
        unregister_as_ext3();
        destroy_inodecache();
 out1:
-       exit_ext4_xattr();
+       ext4_exit_xattr();
 out2:
-       exit_ext4_mballoc();
+       ext4_exit_mballoc();
 out3:
+       kfree(ext4_feat);
        remove_proc_entry("fs/ext4", NULL);
        kset_unregister(ext4_kset);
 out4:
-       exit_ext4_system_zone();
+       ext4_exit_system_zone();
+out5:
+       ext4_exit_pageio();
        return err;
 }
 
-static void __exit exit_ext4_fs(void)
+static void __exit ext4_exit_fs(void)
 {
+       ext4_destroy_lazyinit_thread();
        unregister_as_ext2();
        unregister_as_ext3();
        unregister_filesystem(&ext4_fs_type);
        destroy_inodecache();
-       exit_ext4_xattr();
-       exit_ext4_mballoc();
+       ext4_exit_xattr();
+       ext4_exit_mballoc();
        remove_proc_entry("fs/ext4", NULL);
        kset_unregister(ext4_kset);
-       exit_ext4_system_zone();
+       ext4_exit_system_zone();
+       ext4_exit_pageio();
 }
 
 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
 MODULE_DESCRIPTION("Fourth Extended Filesystem");
 MODULE_LICENSE("GPL");
-module_init(init_ext4_fs)
-module_exit(exit_ext4_fs)
+module_init(ext4_init_fs)
+module_exit(ext4_exit_fs)
index 3a8cd8dff1ad7ab6aa2cff90fa9916f5141cf4f5..fa4b899da4b37243cb298ecff6b822a888262899 100644 (file)
@@ -1588,7 +1588,7 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
 #undef BLOCK_HASH_SHIFT
 
 int __init
-init_ext4_xattr(void)
+ext4_init_xattr(void)
 {
        ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
        if (!ext4_xattr_cache)
@@ -1597,7 +1597,7 @@ init_ext4_xattr(void)
 }
 
 void
-exit_ext4_xattr(void)
+ext4_exit_xattr(void)
 {
        if (ext4_xattr_cache)
                mb_cache_destroy(ext4_xattr_cache);
index 518e96e439052fc19681b18da745aa5063209dcd..281dd8353652454145754eec177f133f036c28f1 100644 (file)
@@ -83,8 +83,8 @@ extern void ext4_xattr_put_super(struct super_block *);
 extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
                            struct ext4_inode *raw_inode, handle_t *handle);
 
-extern int init_ext4_xattr(void);
-extern void exit_ext4_xattr(void);
+extern int __init ext4_init_xattr(void);
+extern void ext4_exit_xattr(void);
 
 extern const struct xattr_handler *ext4_xattr_handlers[];
 
@@ -121,14 +121,14 @@ ext4_xattr_put_super(struct super_block *sb)
 {
 }
 
-static inline int
+static __init inline int
 init_ext4_xattr(void)
 {
        return 0;
 }
 
 static inline void
-exit_ext4_xattr(void)
+ext4_exit_xattr(void)
 {
 }
 
index f8cc34f542c3a1cc8b4f53309ffae317ecdddf15..ecc8b3954ed6ca2558cf9a34e1b60fa3b9513290 100644 (file)
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
  * match the state "is the filp on a fasync list".
  *
  */
-static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
+int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
 {
        struct fasync_struct *fa, **fp;
        int result = 0;
@@ -666,21 +666,31 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
        return result;
 }
 
+struct fasync_struct *fasync_alloc(void)
+{
+       return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+}
+
 /*
- * Add a fasync entry. Return negative on error, positive if
- * added, and zero if did nothing but change an existing one.
+ * NOTE! This can be used only for unused fasync entries:
+ * entries that actually got inserted on the fasync list
+ * need to be released by rcu - see fasync_remove_entry.
+ */
+void fasync_free(struct fasync_struct *new)
+{
+       kmem_cache_free(fasync_cache, new);
+}
+
+/*
+ * Insert a new entry into the fasync list.  Return the pointer to the
+ * old one if we didn't use the new one.
  *
  * NOTE! It is very important that the FASYNC flag always
  * match the state "is the filp on a fasync list".
  */
-static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
 {
-       struct fasync_struct *new, *fa, **fp;
-       int result = 0;
-
-       new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
-       if (!new)
-               return -ENOMEM;
+        struct fasync_struct *fa, **fp;
 
        spin_lock(&filp->f_lock);
        spin_lock(&fasync_lock);
@@ -691,8 +701,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
                spin_lock_irq(&fa->fa_lock);
                fa->fa_fd = fd;
                spin_unlock_irq(&fa->fa_lock);
-
-               kmem_cache_free(fasync_cache, new);
                goto out;
        }
 
@@ -702,13 +710,39 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
        new->fa_fd = fd;
        new->fa_next = *fapp;
        rcu_assign_pointer(*fapp, new);
-       result = 1;
        filp->f_flags |= FASYNC;
 
 out:
        spin_unlock(&fasync_lock);
        spin_unlock(&filp->f_lock);
-       return result;
+       return fa;
+}
+
+/*
+ * Add a fasync entry. Return negative on error, positive if
+ * added, and zero if did nothing but change an existing one.
+ */
+static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+{
+       struct fasync_struct *new;
+
+       new = fasync_alloc();
+       if (!new)
+               return -ENOMEM;
+
+       /*
+        * fasync_insert_entry() returns the old (update) entry if
+        * it existed.
+        *
+        * So free the (unused) new entry and return 0 to let the
+        * caller know that we didn't add any new fasync entries.
+        */
+       if (fasync_insert_entry(fd, filp, fapp, new)) {
+               fasync_free(new);
+               return 0;
+       }
+
+       return 1;
 }
 
 /*
index b98664275f02460c01c5fb2b01d1ddf933671b08..6e07696308dc17a2c8ce26f852b04550d070175f 100644 (file)
@@ -1334,12 +1334,7 @@ out_finish:
 
 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
 {
-       int i;
-
-       for (i = 0; i < req->num_pages; i++) {
-               struct page *page = req->pages[i];
-               page_cache_release(page);
-       }
+       release_pages(req->pages, req->num_pages, 0);
 }
 
 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
index f855ea4fc88895a13b53b075b9a3fb0dbcb53d8b..e92fdbb3bc3aebd5b52221d2b0e5ec4094a37fe9 100644 (file)
@@ -530,6 +530,41 @@ static int ioctl_fsthaw(struct file *filp)
        return thaw_super(sb);
 }
 
+static int ioctl_fstrim(struct file *filp, void __user *argp)
+{
+       struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+       struct fstrim_range range;
+       int ret = 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       /* If filesystem doesn't support trim feature, return. */
+       if (sb->s_op->trim_fs == NULL)
+               return -EOPNOTSUPP;
+
+       /* If a blockdevice-backed filesystem isn't specified, return EINVAL. */
+       if (sb->s_bdev == NULL)
+               return -EINVAL;
+
+       if (argp == NULL) {
+               range.start = 0;
+               range.len = ULLONG_MAX;
+               range.minlen = 0;
+       } else if (copy_from_user(&range, argp, sizeof(range)))
+               return -EFAULT;
+
+       ret = sb->s_op->trim_fs(sb, &range);
+       if (ret < 0)
+               return ret;
+
+       if ((argp != NULL) &&
+           (copy_to_user(argp, &range, sizeof(range))))
+               return -EFAULT;
+
+       return 0;
+}
+
 /*
  * When you add any new common ioctls to the switches above and below
  * please update compat_sys_ioctl() too.
@@ -580,6 +615,10 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
                error = ioctl_fsthaw(filp);
                break;
 
+       case FITRIM:
+               error = ioctl_fstrim(filp, argp);
+               break;
+
        case FS_IOC_FIEMAP:
                return ioctl_fiemap(filp, arg);
 
index 60c2b944d76267bbd40358f7384dfe3fc9240bd3..79cf7f616bbe8fdb9f84a90e92bf4f6717ae22cb 100644 (file)
@@ -543,6 +543,34 @@ static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
        return vol_desc_start;
 }
 
+/*
+ * Check if root directory is empty (has less than 3 files).
+ *
+ * Used to detect broken CDs where ISO root directory is empty but Joliet root
+ * directory is OK. If such CD has Rock Ridge extensions, they will be disabled
+ * (and Joliet used instead) or else no files would be visible.
+ */
+static bool rootdir_empty(struct super_block *sb, unsigned long block)
+{
+       int offset = 0, files = 0, de_len;
+       struct iso_directory_record *de;
+       struct buffer_head *bh;
+
+       bh = sb_bread(sb, block);
+       if (!bh)
+               return true;
+       while (files < 3) {
+               de = (struct iso_directory_record *) (bh->b_data + offset);
+               de_len = *(unsigned char *) de;
+               if (de_len == 0)
+                       break;
+               files++;
+               offset += de_len;
+       }
+       brelse(bh);
+       return files < 3;
+}
+
 /*
  * Initialize the superblock and read the root inode.
  *
@@ -842,6 +870,18 @@ root_found:
        if (IS_ERR(inode))
                goto out_no_root;
 
+       /*
+        * Fix for broken CDs with Rock Ridge and empty ISO root directory but
+        * correct Joliet root directory.
+        */
+       if (sbi->s_rock == 1 && joliet_level &&
+                               rootdir_empty(s, sbi->s_firstdatazone)) {
+               printk(KERN_NOTICE
+                       "ISOFS: primary root directory is empty. "
+                       "Disabling Rock Ridge and switching to Joliet.");
+               sbi->s_rock = 0;
+       }
+
        /*
         * If this disk has both Rock Ridge and Joliet on it, then we
         * want to use Rock Ridge by default.  This can be overridden
index 05a38b9c4c0ecbe749ef73931933c0e089fe15ba..e4b87bc1fa56e0dd2ff8c77ae853367f3a529a81 100644 (file)
@@ -221,7 +221,7 @@ restart:
                        goto restart;
                }
                if (buffer_locked(bh)) {
-                       atomic_inc(&bh->b_count);
+                       get_bh(bh);
                        spin_unlock(&journal->j_list_lock);
                        jbd_unlock_bh_state(bh);
                        wait_on_buffer(bh);
@@ -283,7 +283,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
        int ret = 0;
 
        if (buffer_locked(bh)) {
-               atomic_inc(&bh->b_count);
+               get_bh(bh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
                wait_on_buffer(bh);
index 85a6883c0aca265b898431eb2a2254632a8a8ef7..34a4861c14b85d493a8b653c4ce700a4c580842e 100644 (file)
@@ -587,13 +587,13 @@ void journal_commit_transaction(journal_t *journal)
                /* Bump b_count to prevent truncate from stumbling over
                    the shadowed buffer!  @@@ This can go if we ever get
                    rid of the BJ_IO/BJ_Shadow pairing of buffers. */
-               atomic_inc(&jh2bh(jh)->b_count);
+               get_bh(jh2bh(jh));
 
                /* Make a temporary IO buffer with which to write it out
                    (this will requeue both the metadata buffer and the
                    temporary IO buffer). new_bh goes on BJ_IO*/
 
-               set_bit(BH_JWrite, &jh2bh(jh)->b_state);
+               set_buffer_jwrite(jh2bh(jh));
                /*
                 * akpm: journal_write_metadata_buffer() sets
                 * new_bh->b_transaction to commit_transaction.
@@ -603,7 +603,7 @@ void journal_commit_transaction(journal_t *journal)
                JBUFFER_TRACE(jh, "ph3: write metadata");
                flags = journal_write_metadata_buffer(commit_transaction,
                                                      jh, &new_jh, blocknr);
-               set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
+               set_buffer_jwrite(jh2bh(new_jh));
                wbuf[bufs++] = jh2bh(new_jh);
 
                /* Record the new block's tag in the current descriptor
@@ -713,7 +713,7 @@ wait_for_iobuf:
                    shadowed buffer */
                jh = commit_transaction->t_shadow_list->b_tprev;
                bh = jh2bh(jh);
-               clear_bit(BH_JWrite, &bh->b_state);
+               clear_buffer_jwrite(bh);
                J_ASSERT_BH(bh, buffer_jbddirty(bh));
 
                /* The metadata is now released for reuse, but we need
index 2c4b1f109da9e6bc3bcedddd423f02cb84c6bc2f..da1b5e4ffce1200675e5cf7690e08d0545b304d8 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/poison.h>
 #include <linux/proc_fs.h>
 #include <linux/debugfs.h>
+#include <linux/ratelimit.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -84,6 +85,7 @@ EXPORT_SYMBOL(journal_force_commit);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
+static const char *journal_dev_name(journal_t *journal, char *buffer);
 
 /*
  * Helper function used to manage commit timeouts
@@ -439,7 +441,7 @@ int __log_start_commit(journal_t *journal, tid_t target)
         */
        if (!tid_geq(journal->j_commit_request, target)) {
                /*
-                * We want a new commit: OK, mark the request and wakup the
+                * We want a new commit: OK, mark the request and wakeup the
                 * commit thread.  We do _not_ do the commit ourselves.
                 */
 
@@ -950,6 +952,8 @@ int journal_create(journal_t *journal)
                if (err)
                        return err;
                bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+               if (unlikely(!bh))
+                       return -ENOMEM;
                lock_buffer(bh);
                memset (bh->b_data, 0, journal->j_blocksize);
                BUFFER_TRACE(bh, "marking dirty");
@@ -1010,6 +1014,23 @@ void journal_update_superblock(journal_t *journal, int wait)
                goto out;
        }
 
+       if (buffer_write_io_error(bh)) {
+               char b[BDEVNAME_SIZE];
+               /*
+                * Oh, dear.  A previous attempt to write the journal
+                * superblock failed.  This could happen because the
+                * USB device was yanked out.  Or it could happen to
+                * be a transient write error and maybe the block will
+                * be remapped.  Nothing we can do but to retry the
+                * write and hope for the best.
+                */
+               printk(KERN_ERR "JBD: previous I/O error detected "
+                      "for journal superblock update for %s.\n",
+                      journal_dev_name(journal, b));
+               clear_buffer_write_io_error(bh);
+               set_buffer_uptodate(bh);
+       }
+
        spin_lock(&journal->j_state_lock);
        jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
                  journal->j_tail, journal->j_tail_sequence, journal->j_errno);
@@ -1021,9 +1042,17 @@ void journal_update_superblock(journal_t *journal, int wait)
 
        BUFFER_TRACE(bh, "marking dirty");
        mark_buffer_dirty(bh);
-       if (wait)
+       if (wait) {
                sync_dirty_buffer(bh);
-       else
+               if (buffer_write_io_error(bh)) {
+                       char b[BDEVNAME_SIZE];
+                       printk(KERN_ERR "JBD: I/O error detected "
+                              "when updating journal superblock for %s.\n",
+                              journal_dev_name(journal, b));
+                       clear_buffer_write_io_error(bh);
+                       set_buffer_uptodate(bh);
+               }
+       } else
                write_dirty_buffer(bh, WRITE);
 
 out:
@@ -1719,7 +1748,6 @@ static void journal_destroy_journal_head_cache(void)
 static struct journal_head *journal_alloc_journal_head(void)
 {
        struct journal_head *ret;
-       static unsigned long last_warning;
 
 #ifdef CONFIG_JBD_DEBUG
        atomic_inc(&nr_journal_heads);
@@ -1727,11 +1755,9 @@ static struct journal_head *journal_alloc_journal_head(void)
        ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
        if (ret == NULL) {
                jbd_debug(1, "out of memory for journal_head\n");
-               if (time_after(jiffies, last_warning + 5*HZ)) {
-                       printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-                              __func__);
-                       last_warning = jiffies;
-               }
+               printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
+                                  __func__);
+
                while (ret == NULL) {
                        yield();
                        ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
index 81051dafebf5cadea98d14ab9deed5fabe068937..5b43e96788e6553ec0a716baa45e1a1ac509e838 100644 (file)
@@ -296,10 +296,10 @@ int journal_skip_recovery(journal_t *journal)
 #ifdef CONFIG_JBD_DEBUG
                int dropped = info.end_transaction -
                              be32_to_cpu(journal->j_superblock->s_sequence);
-#endif
                jbd_debug(1,
                          "JBD: ignoring %d transaction%s from the journal.\n",
                          dropped, (dropped == 1) ? "" : "s");
+#endif
                journal->j_transaction_sequence = ++info.end_transaction;
        }
 
index 5ae71e75a4910aeae6fe37eb1edefe70a7b210cc..846a3f314111354609b989d702ff71bd09fbbb0b 100644 (file)
@@ -293,9 +293,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
                jbd_free_handle(handle);
                current->journal_info = NULL;
                handle = ERR_PTR(err);
-               goto out;
        }
-out:
        return handle;
 }
 
@@ -528,7 +526,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
        transaction = handle->h_transaction;
        journal = transaction->t_journal;
 
-       jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+       jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
 
        JBUFFER_TRACE(jh, "entry");
 repeat:
@@ -713,7 +711,7 @@ done:
                J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
                            "Possible IO failure.\n");
                page = jh2bh(jh)->b_page;
-               offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+               offset = offset_in_page(jh2bh(jh)->b_data);
                source = kmap_atomic(page, KM_USER0);
                memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
                kunmap_atomic(source, KM_USER0);
index 6571a056e55d6df336056925381499ec9b937d68..6a79fd0a1a32cdd018fea9879a6026e4dcf9c8b6 100644 (file)
@@ -299,6 +299,16 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
                transaction->t_chp_stats.cs_forced_to_close++;
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
+               if (unlikely(journal->j_flags & JBD2_UNMOUNT))
+                       /*
+                        * The journal thread is dead; so starting and
+                        * waiting for a commit to finish will cause
+                        * us to wait for a _very_ long time.
+                        */
+                       printk(KERN_ERR "JBD2: %s: "
+                              "Waiting for Godot: block %llu\n",
+                              journal->j_devname,
+                              (unsigned long long) bh->b_blocknr);
                jbd2_log_start_commit(journal, tid);
                jbd2_log_wait_commit(journal, tid);
                ret = 1;
index bc6be8bda1cc067d3230acfbe20847b45906ee34..f3ad1598b20128bc3acaaa1bd81e7ece1e27e270 100644 (file)
@@ -26,7 +26,9 @@
 #include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/bitops.h>
 #include <trace/events/jbd2.h>
+#include <asm/system.h>
 
 /*
  * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -201,7 +203,7 @@ static int journal_submit_data_buffers(journal_t *journal,
        spin_lock(&journal->j_list_lock);
        list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
                mapping = jinode->i_vfs_inode->i_mapping;
-               jinode->i_flags |= JI_COMMIT_RUNNING;
+               set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
                spin_unlock(&journal->j_list_lock);
                /*
                 * submit the inode data buffers. We use writepage
@@ -216,7 +218,8 @@ static int journal_submit_data_buffers(journal_t *journal,
                spin_lock(&journal->j_list_lock);
                J_ASSERT(jinode->i_transaction == commit_transaction);
                commit_transaction->t_flushed_data_blocks = 1;
-               jinode->i_flags &= ~JI_COMMIT_RUNNING;
+               clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
+               smp_mb__after_clear_bit();
                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
        }
        spin_unlock(&journal->j_list_lock);
@@ -237,7 +240,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
        /* For locking, see the comment in journal_submit_data_buffers() */
        spin_lock(&journal->j_list_lock);
        list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
-               jinode->i_flags |= JI_COMMIT_RUNNING;
+               set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
                spin_unlock(&journal->j_list_lock);
                err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
                if (err) {
@@ -253,7 +256,8 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
                                ret = err;
                }
                spin_lock(&journal->j_list_lock);
-               jinode->i_flags &= ~JI_COMMIT_RUNNING;
+               clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
+               smp_mb__after_clear_bit();
                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
        }
 
index 262419f83d800bfb6e4bbfb0ca93e3af2c3f64be..538417c1fdbbcbb630cbcdb885c74ebe5f433844 100644 (file)
 #include <linux/log2.h>
 #include <linux/vmalloc.h>
 #include <linux/backing-dev.h>
+#include <linux/bitops.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/jbd2.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
+#include <asm/system.h>
 
 EXPORT_SYMBOL(jbd2_journal_extend);
 EXPORT_SYMBOL(jbd2_journal_stop);
@@ -478,7 +480,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
         */
        if (!tid_geq(journal->j_commit_request, target)) {
                /*
-                * We want a new commit: OK, mark the request and wakup the
+                * We want a new commit: OK, mark the request and wakeup the
                 * commit thread.  We do _not_ do the commit ourselves.
                 */
 
@@ -2210,7 +2212,7 @@ void jbd2_journal_release_jbd_inode(journal_t *journal,
 restart:
        spin_lock(&journal->j_list_lock);
        /* Is commit writing out inode - we have to wait */
-       if (jinode->i_flags & JI_COMMIT_RUNNING) {
+       if (test_bit(__JI_COMMIT_RUNNING, &jinode->i_flags)) {
                wait_queue_head_t *wq;
                DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING);
                wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING);
index f3479d6e0a83a31c616ab19f260a9062a5a5a5db..6bf0a242613ec54fb3186931031262e48329f2c6 100644 (file)
@@ -156,6 +156,7 @@ alloc_transaction:
         */
 repeat:
        read_lock(&journal->j_state_lock);
+       BUG_ON(journal->j_flags & JBD2_UNMOUNT);
        if (is_journal_aborted(journal) ||
            (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
                read_unlock(&journal->j_state_lock);
index b13aabc1229894507ca05ba53159c6f081d77fcf..abfff9d7979dc60b57646b6129cdb7dd279c6041 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/in.h>
 #include <linux/uio.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/mutex.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
@@ -130,15 +129,6 @@ lockd(void *vrqstp)
 
        dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
-       /*
-        * FIXME: it would be nice if lockd didn't spend its entire life
-        * running under the BKL. At the very least, it would be good to
-        * have someone clarify what it's intended to protect here. I've
-        * seen some handwavy posts about posix locking needing to be
-        * done under the BKL, but it's far from clear.
-        */
-       lock_kernel();
-
        if (!nlm_timeout)
                nlm_timeout = LOCKD_DFLT_TIMEO;
        nlmsvc_timeout = nlm_timeout * HZ;
@@ -195,7 +185,6 @@ lockd(void *vrqstp)
        if (nlmsvc_ops)
                nlmsvc_invalidate_all();
        nlm_shutdown_hosts();
-       unlock_kernel();
        return 0;
 }
 
index 6f1ef000975abde05f2fec833b3dfcb6fab563dc..c462d346acbda920a78ef03c153dc088b70ef7f4 100644 (file)
@@ -700,14 +700,16 @@ nlmsvc_notify_blocked(struct file_lock *fl)
        struct nlm_block        *block;
 
        dprintk("lockd: VFS unblock notification for block %p\n", fl);
+       spin_lock(&nlm_blocked_lock);
        list_for_each_entry(block, &nlm_blocked, b_list) {
                if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
-                       nlmsvc_insert_block(block, 0);
+                       nlmsvc_insert_block_locked(block, 0);
+                       spin_unlock(&nlm_blocked_lock);
                        svc_wake_up(block->b_daemon);
                        return;
                }
        }
-
+       spin_unlock(&nlm_blocked_lock);
        printk(KERN_WARNING "lockd: notification for unknown block!\n");
 }
 
index d0ef94cfb3da4d218b66a0a4046d96ab30506f3b..1ca0679c80bfcca842d277f7597d96dbe8792c35 100644 (file)
@@ -170,6 +170,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
 
 again:
        file->f_locks = 0;
+       lock_flocks(); /* protects i_flock list */
        for (fl = inode->i_flock; fl; fl = fl->fl_next) {
                if (fl->fl_lmops != &nlmsvc_lock_operations)
                        continue;
@@ -181,6 +182,7 @@ again:
                if (match(lockhost, host)) {
                        struct file_lock lock = *fl;
 
+                       unlock_flocks();
                        lock.fl_type  = F_UNLCK;
                        lock.fl_start = 0;
                        lock.fl_end   = OFFSET_MAX;
@@ -192,6 +194,7 @@ again:
                        goto again;
                }
        }
+       unlock_flocks();
 
        return 0;
 }
@@ -226,10 +229,14 @@ nlm_file_inuse(struct nlm_file *file)
        if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
                return 1;
 
+       lock_flocks();
        for (fl = inode->i_flock; fl; fl = fl->fl_next) {
-               if (fl->fl_lmops == &nlmsvc_lock_operations)
+               if (fl->fl_lmops == &nlmsvc_lock_operations) {
+                       unlock_flocks();
                        return 1;
+               }
        }
+       unlock_flocks();
        file->f_locks = 0;
        return 0;
 }
index 4de3a2666810535d1be9d12662b0544474f619c8..50ec15927aab3164a456875e6c11b1f4fa2f8bef 100644 (file)
@@ -142,6 +142,7 @@ int lease_break_time = 45;
 
 static LIST_HEAD(file_lock_list);
 static LIST_HEAD(blocked_list);
+static DEFINE_SPINLOCK(file_lock_lock);
 
 /*
  * Protects the two list heads above, plus the inode->i_flock list
@@ -149,23 +150,24 @@ static LIST_HEAD(blocked_list);
  */
 void lock_flocks(void)
 {
-       lock_kernel();
+       spin_lock(&file_lock_lock);
 }
 EXPORT_SYMBOL_GPL(lock_flocks);
 
 void unlock_flocks(void)
 {
-       unlock_kernel();
+       spin_unlock(&file_lock_lock);
 }
 EXPORT_SYMBOL_GPL(unlock_flocks);
 
 static struct kmem_cache *filelock_cache __read_mostly;
 
 /* Allocate an empty lock structure. */
-static struct file_lock *locks_alloc_lock(void)
+struct file_lock *locks_alloc_lock(void)
 {
        return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
 }
+EXPORT_SYMBOL_GPL(locks_alloc_lock);
 
 void locks_release_private(struct file_lock *fl)
 {
@@ -1365,7 +1367,6 @@ int fcntl_getlease(struct file *filp)
 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
 {
        struct file_lock *fl, **before, **my_before = NULL, *lease;
-       struct file_lock *new_fl = NULL;
        struct dentry *dentry = filp->f_path.dentry;
        struct inode *inode = dentry->d_inode;
        int error, rdlease_count = 0, wrlease_count = 0;
@@ -1385,11 +1386,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        lease = *flp;
 
        if (arg != F_UNLCK) {
-               error = -ENOMEM;
-               new_fl = locks_alloc_lock();
-               if (new_fl == NULL)
-                       goto out;
-
                error = -EAGAIN;
                if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
                        goto out;
@@ -1434,7 +1430,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
                goto out;
        }
 
-       error = 0;
        if (arg == F_UNLCK)
                goto out;
 
@@ -1442,15 +1437,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        if (!leases_enable)
                goto out;
 
-       locks_copy_lock(new_fl, lease);
-       locks_insert_lock(before, new_fl);
-
-       *flp = new_fl;
+       locks_insert_lock(before, lease);
        return 0;
 
 out:
-       if (new_fl != NULL)
-               locks_free_lock(new_fl);
+       locks_free_lock(lease);
        return error;
 }
 EXPORT_SYMBOL(generic_setlease);
@@ -1514,26 +1505,38 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
  */
 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
 {
-       struct file_lock fl, *flp = &fl;
+       struct file_lock *fl;
+       struct fasync_struct *new;
        struct inode *inode = filp->f_path.dentry->d_inode;
        int error;
 
-       locks_init_lock(&fl);
-       error = lease_init(filp, arg, &fl);
-       if (error)
-               return error;
+       fl = lease_alloc(filp, arg);
+       if (IS_ERR(fl))
+               return PTR_ERR(fl);
 
+       new = fasync_alloc();
+       if (!new) {
+               locks_free_lock(fl);
+               return -ENOMEM;
+       }
        lock_flocks();
-
-       error = __vfs_setlease(filp, arg, &flp);
+       error = __vfs_setlease(filp, arg, &fl);
        if (error || arg == F_UNLCK)
                goto out_unlock;
 
-       error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
+       /*
+        * fasync_insert_entry() returns the old entry if any.
+        * If there was no old entry, then it used 'new' and
+        * inserted it into the fasync list. Clear new so that
+        * we don't release it here.
+        */
+       if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+               new = NULL;
+
        if (error < 0) {
                /* remove lease just inserted by setlease */
-               flp->fl_type = F_UNLCK | F_INPROGRESS;
-               flp->fl_break_time = jiffies - 10;
+               fl->fl_type = F_UNLCK | F_INPROGRESS;
+               fl->fl_break_time = jiffies - 10;
                time_out_leases(inode);
                goto out_unlock;
        }
@@ -1541,6 +1544,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
        error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 out_unlock:
        unlock_flocks();
+       if (new)
+               fasync_free(new);
        return error;
 }
 
index fd667652c5026daf78c878941da5f950dfdb4a5b..ba306658a6db16441843d7c4244a8c0158625acd 100644 (file)
@@ -1,7 +1,6 @@
 config NFS_FS
        tristate "NFS client support"
        depends on INET && FILE_LOCKING
-       depends on BKL # fix as soon as lockd is done
        select LOCKD
        select SUNRPC
        select NFS_ACL_SUPPORT if NFS_V3_ACL
index 31a78fce4732a0d473884425a0728073bf96668e..18b3e8975fe05c2364dd507d50b364b4c13b0dfd 100644 (file)
@@ -2,7 +2,6 @@ config NFSD
        tristate "NFS server support"
        depends on INET
        depends on FILE_LOCKING
-       depends on BKL # fix as soon as lockd is done
        select LOCKD
        select SUNRPC
        select EXPORTFS
index 9019e8ec9dc861a9e3b0ff27c53bc16ab2d42cb1..56347e0ac88da33f1fd655809a90c1509f0cb389 100644 (file)
@@ -2614,7 +2614,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
        struct nfs4_delegation *dp;
        struct nfs4_stateowner *sop = stp->st_stateowner;
        int cb_up = atomic_read(&sop->so_client->cl_cb_set);
-       struct file_lock fl, *flp = &fl;
+       struct file_lock *fl;
        int status, flag = 0;
 
        flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2648,20 +2648,24 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
                flag = NFS4_OPEN_DELEGATE_NONE;
                goto out;
        }
-       locks_init_lock(&fl);
-       fl.fl_lmops = &nfsd_lease_mng_ops;
-       fl.fl_flags = FL_LEASE;
-       fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
-       fl.fl_end = OFFSET_MAX;
-       fl.fl_owner =  (fl_owner_t)dp;
-       fl.fl_file = find_readable_file(stp->st_file);
-       BUG_ON(!fl.fl_file);
-       fl.fl_pid = current->tgid;
+       status = -ENOMEM;
+       fl = locks_alloc_lock();
+       if (!fl)
+               goto out;
+       locks_init_lock(fl);
+       fl->fl_lmops = &nfsd_lease_mng_ops;
+       fl->fl_flags = FL_LEASE;
+       fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+       fl->fl_end = OFFSET_MAX;
+       fl->fl_owner =  (fl_owner_t)dp;
+       fl->fl_file = find_readable_file(stp->st_file);
+       BUG_ON(!fl->fl_file);
+       fl->fl_pid = current->tgid;
 
        /* vfs_setlease checks to see if delegation should be handed out.
         * the lock_manager callbacks fl_mylease and fl_change are used
         */
-       if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) {
+       if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
                dprintk("NFSD: setlease failed [%d], no delegation\n", status);
                unhash_delegation(dp);
                flag = NFS4_OPEN_DELEGATE_NONE;
index 9b094c1c846542160ff6c265171467d5ef0050ff..f3d02ca461ecfcc341eb783fd15da8df486fde11 100644 (file)
@@ -226,7 +226,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
 {
        struct mm_struct *mm;
 
-       if (mutex_lock_killable(&task->cred_guard_mutex))
+       if (mutex_lock_killable(&task->signal->cred_guard_mutex))
                return NULL;
 
        mm = get_task_mm(task);
@@ -235,7 +235,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
                mmput(mm);
                mm = NULL;
        }
-       mutex_unlock(&task->cred_guard_mutex);
+       mutex_unlock(&task->signal->cred_guard_mutex);
 
        return mm;
 }
@@ -2354,14 +2354,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
                goto out_free;
 
        /* Guard against adverse ptrace interaction */
-       length = mutex_lock_interruptible(&task->cred_guard_mutex);
+       length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
        if (length < 0)
                goto out_free;
 
        length = security_setprocattr(task,
                                      (char*)file->f_path.dentry->d_name.name,
                                      (void*)page, count);
-       mutex_unlock(&task->cred_guard_mutex);
+       mutex_unlock(&task->signal->cred_guard_mutex);
 out_free:
        free_page((unsigned long) page);
 out:
index 1807c2419f174a0a54da74723384489e08cc6325..37994737c9834fe83d541b68d80b4925010e3bd0 100644 (file)
@@ -10,13 +10,13 @@ static int show_softirqs(struct seq_file *p, void *v)
 {
        int i, j;
 
-       seq_printf(p, "                ");
+       seq_printf(p, "                    ");
        for_each_possible_cpu(i)
                seq_printf(p, "CPU%-8d", i);
        seq_printf(p, "\n");
 
        for (i = 0; i < NR_SOFTIRQS; i++) {
-               seq_printf(p, "%8s:", softirq_to_name[i]);
+               seq_printf(p, "%12s:", softirq_to_name[i]);
                for_each_possible_cpu(j)
                        seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
                seq_printf(p, "\n");
index bf31b03fc275295e576554abdbea4f5264b99a23..e15a19c93baefa87ed4fd7ebbe57a532a5c80270 100644 (file)
@@ -31,7 +31,6 @@ static int show_stat(struct seq_file *p, void *v)
        u64 sum_softirq = 0;
        unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
        struct timespec boottime;
-       unsigned int per_irq_sum;
 
        user = nice = system = idle = iowait =
                irq = softirq = steal = cputime64_zero;
@@ -52,9 +51,7 @@ static int show_stat(struct seq_file *p, void *v)
                guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
                guest_nice = cputime64_add(guest_nice,
                        kstat_cpu(i).cpustat.guest_nice);
-               for_each_irq_nr(j) {
-                       sum += kstat_irqs_cpu(j, i);
-               }
+               sum += kstat_cpu_irqs_sum(i);
                sum += arch_irq_stat_cpu(i);
 
                for (j = 0; j < NR_SOFTIRQS; j++) {
@@ -110,13 +107,8 @@ static int show_stat(struct seq_file *p, void *v)
        seq_printf(p, "intr %llu", (unsigned long long)sum);
 
        /* sum again ? it could be updated? */
-       for_each_irq_nr(j) {
-               per_irq_sum = 0;
-               for_each_possible_cpu(i)
-                       per_irq_sum += kstat_irqs_cpu(j, i);
-
-               seq_printf(p, " %u", per_irq_sum);
-       }
+       for_each_irq_nr(j)
+               seq_printf(p, " %u", kstat_irqs(j));
 
        seq_printf(p,
                "\nctxt %llu\n"
index 871e25ed006970667229eea95c56342836c53525..da6b01d70f019506face0dae73f0b78c3200d299 100644 (file)
@@ -327,6 +327,7 @@ struct mem_size_stats {
        unsigned long private_clean;
        unsigned long private_dirty;
        unsigned long referenced;
+       unsigned long anonymous;
        unsigned long swap;
        u64 pss;
 };
@@ -357,6 +358,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                if (!page)
                        continue;
 
+               if (PageAnon(page))
+                       mss->anonymous += PAGE_SIZE;
+
                mss->resident += PAGE_SIZE;
                /* Accumulate the size in pages that have been accessed. */
                if (pte_young(ptent) || PageReferenced(page))
@@ -410,6 +414,7 @@ static int show_smap(struct seq_file *m, void *v)
                   "Private_Clean:  %8lu kB\n"
                   "Private_Dirty:  %8lu kB\n"
                   "Referenced:     %8lu kB\n"
+                  "Anonymous:      %8lu kB\n"
                   "Swap:           %8lu kB\n"
                   "KernelPageSize: %8lu kB\n"
                   "MMUPageSize:    %8lu kB\n",
@@ -421,6 +426,7 @@ static int show_smap(struct seq_file *m, void *v)
                   mss.private_clean >> 10,
                   mss.private_dirty >> 10,
                   mss.referenced >> 10,
+                  mss.anonymous >> 10,
                   mss.swap >> 10,
                   vma_kernel_pagesize(vma) >> 10,
                   vma_mmu_pagesize(vma) >> 10);
index 3e21b1e2ad3a7c63203c4bf6291ffb6f82907aba..880fd988436695344ff58134ff839382a8003689 100644 (file)
@@ -4,6 +4,7 @@
 
 config QUOTA
        bool "Quota support"
+       select QUOTACTL
        help
          If you say Y here, you will be able to set per user limits for disk
          usage (also called disk quotas). Currently, it works for the
@@ -65,8 +66,7 @@ config QFMT_V2
 
 config QUOTACTL
        bool
-       depends on XFS_QUOTA || QUOTA
-       default y
+       default n
 
 config QUOTACTL_COMPAT
        bool
index aad1316a977f10888a3a1d64e6bb4430bae4a5ef..0fed41e6efcda6e993d83fe9bbb6da970ff4bd4b 100644 (file)
@@ -1386,6 +1386,9 @@ static void __dquot_initialize(struct inode *inode, int type)
                /* Avoid races with quotaoff() */
                if (!sb_has_quota_active(sb, cnt))
                        continue;
+               /* We could race with quotaon or dqget() could have failed */
+               if (!got[cnt])
+                       continue;
                if (!inode->i_dquot[cnt]) {
                        inode->i_dquot[cnt] = got[cnt];
                        got[cnt] = NULL;
@@ -1736,6 +1739,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
        qsize_t rsv_space = 0;
        struct dquot *transfer_from[MAXQUOTAS] = {};
        int cnt, ret = 0;
+       char is_valid[MAXQUOTAS] = {};
        char warntype_to[MAXQUOTAS];
        char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
 
@@ -1757,8 +1761,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
        space = cur_space + rsv_space;
        /* Build the transfer_from list and check the limits */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               /*
+                * Skip changes for same uid or gid or for turned off quota-type.
+                */
                if (!transfer_to[cnt])
                        continue;
+               /* Avoid races with quotaoff() */
+               if (!sb_has_quota_active(inode->i_sb, cnt))
+                       continue;
+               is_valid[cnt] = 1;
                transfer_from[cnt] = inode->i_dquot[cnt];
                ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
                if (ret)
@@ -1772,12 +1783,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
         * Finally perform the needed transfer from transfer_from to transfer_to
         */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               /*
-                * Skip changes for same uid or gid or for turned off quota-type.
-                */
-               if (!transfer_to[cnt])
+               if (!is_valid[cnt])
                        continue;
-
                /* Due to IO error we might not have transfer_from[] structure */
                if (transfer_from[cnt]) {
                        warntype_from_inodes[cnt] =
@@ -1801,18 +1808,19 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
 
        mark_all_dquot_dirty(transfer_from);
        mark_all_dquot_dirty(transfer_to);
-       /* Pass back references to put */
-       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               transfer_to[cnt] = transfer_from[cnt];
-warn:
        flush_warnings(transfer_to, warntype_to);
        flush_warnings(transfer_from, warntype_from_inodes);
        flush_warnings(transfer_from, warntype_from_space);
-       return ret;
+       /* Pass back references to put */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (is_valid[cnt])
+                       transfer_to[cnt] = transfer_from[cnt];
+       return 0;
 over_quota:
        spin_unlock(&dq_data_lock);
        up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
-       goto warn;
+       flush_warnings(transfer_to, warntype_to);
+       return ret;
 }
 EXPORT_SYMBOL(__dquot_transfer);
 
index 500a669f779016eb33a4e570805285d6d44e7af2..b7b10aa308616dea7deee300902799b12fa6541f 100644 (file)
@@ -67,7 +67,7 @@ static long __estimate_accuracy(struct timespec *tv)
        return slack;
 }
 
-static long estimate_accuracy(struct timespec *tv)
+long select_estimate_accuracy(struct timespec *tv)
 {
        unsigned long ret;
        struct timespec now;
@@ -417,7 +417,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
        }
 
        if (end_time && !timed_out)
-               slack = estimate_accuracy(end_time);
+               slack = select_estimate_accuracy(end_time);
 
        retval = 0;
        for (;;) {
@@ -769,7 +769,7 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
        }
 
        if (end_time && !timed_out)
-               slack = estimate_accuracy(end_time);
+               slack = select_estimate_accuracy(end_time);
 
        for (;;) {
                struct poll_list *walk;
index 480f28127f09f08f7d26577eec6eea7a09c68d5d..6100ec0fa1d453770f41db47e0402daea8271c64 100644 (file)
@@ -22,6 +22,7 @@ config XFS_FS
 config XFS_QUOTA
        bool "XFS Quota support"
        depends on XFS_FS
+       select QUOTACTL
        help
          If you say Y here, you will be able to set limits for disk usage on
          a per user and/or a per group basis under XFS.  XFS considers quota
index ca0f239f0e13cec272e373dabec7d4c4b6bc941c..2bcc5c7c22a6329752c666f409a1a2abefbbc430 100644 (file)
@@ -33,10 +33,10 @@ typedef u64 cputime64_t;
 
 
 /*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
  */
-#define cputime_to_msecs(__ct)         jiffies_to_msecs(__ct)
-#define msecs_to_cputime(__msecs)      msecs_to_jiffies(__msecs)
+#define cputime_to_usecs(__ct)         jiffies_to_usecs(__ct);
+#define usecs_to_cputime(__msecs)      usecs_to_jiffies(__msecs);
 
 /*
  * Convert cputime to seconds and back.
index 8ca18e26d7e39fe429a8179d48f2f9f17f58a589..ff5c66080c8c947e64edd2c8db17436d98921ad7 100644 (file)
@@ -210,7 +210,7 @@ extern void gpio_unexport(unsigned gpio);
 
 #endif /* CONFIG_GPIO_SYSFS */
 
-#else  /* !CONFIG_HAVE_GPIO_LIB */
+#else  /* !CONFIG_GPIOLIB */
 
 static inline int gpio_is_valid(int number)
 {
@@ -239,7 +239,7 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
        gpio_set_value(gpio, value);
 }
 
-#endif /* !CONFIG_HAVE_GPIO_LIB */
+#endif /* !CONFIG_GPIOLIB */
 
 #ifndef CONFIG_GPIO_SYSFS
 
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
new file mode 100644 (file)
index 0000000..521a0f8
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
+ *
+ * Copyright (C) 2005 ARM Ltd
+ * Copyright (C) 2010 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * pl08x information required by platform code
+ *
+ * Please credit ARM.com
+ * Documentation: ARM DDI 0196D
+ *
+ */
+
+#ifndef AMBA_PL08X_H
+#define AMBA_PL08X_H
+
+/* We need sizes of structs from this header */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+/**
+ * struct pl08x_channel_data - data structure to pass info between
+ * platform and PL08x driver regarding channel configuration
+ * @bus_id: name of this device channel, not just a device name since
+ * devices may have more than one channel e.g. "foo_tx"
+ * @min_signal: the minimum DMA signal number to be muxed in for this
+ * channel (for platforms supporting muxed signals). If you have
+ * static assignments, make sure this is set to the assigned signal
+ * number, PL08x have 16 possible signals in number 0 thru 15 so
+ * when these are not enough they often get muxed (in hardware)
+ * disabling simultaneous use of the same channel for two devices.
+ * @max_signal: the maximum DMA signal number to be muxed in for
+ * the channel. Set to the same as min_signal for
+ * devices with static assignments
+ * @muxval: a number usually used to poke into some mux regiser to
+ * mux in the signal to this channel
+ * @cctl_opt: default options for the channel control register
+ * @addr: source/target address in physical memory for this DMA channel,
+ * can be the address of a FIFO register for burst requests for example.
+ * This can be left undefined if the PrimeCell API is used for configuring
+ * this.
+ * @circular_buffer: whether the buffer passed in is circular and
+ * shall simply be looped round round (like a record baby round
+ * round round round)
+ * @single: the device connected to this channel will request single
+ * DMA transfers, not bursts. (Bursts are default.)
+ */
+struct pl08x_channel_data {
+       char *bus_id;
+       int min_signal;
+       int max_signal;
+       u32 muxval;
+       u32 cctl;
+       u32 ccfg;
+       dma_addr_t addr;
+       bool circular_buffer;
+       bool single;
+};
+
+/**
+ * Struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ * @fill_bytes: bytes required to fill to the next bus memory
+ * boundary
+ */
+struct pl08x_bus_data {
+       dma_addr_t addr;
+       u8 maxwidth;
+       u8 buswidth;
+       u32 fill_bytes;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @signal: the physical signal (aka channel) serving this
+ * physical channel right now
+ * @serving: the virtual channel currently being served by this
+ * physical channel
+ */
+struct pl08x_phy_chan {
+       unsigned int id;
+       void __iomem *base;
+       spinlock_t lock;
+       int signal;
+       struct pl08x_dma_chan *serving;
+       u32 csrc;
+       u32 cdst;
+       u32 clli;
+       u32 cctl;
+       u32 ccfg;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ */
+struct pl08x_txd {
+       struct dma_async_tx_descriptor tx;
+       struct list_head node;
+       enum dma_data_direction direction;
+       struct pl08x_bus_data srcbus;
+       struct pl08x_bus_data dstbus;
+       int len;
+       dma_addr_t llis_bus;
+       void *llis_va;
+       struct pl08x_channel_data *cd;
+       bool active;
+       /*
+        * Settings to be put into the physical channel when we
+        * trigger this txd
+        */
+       u32 csrc;
+       u32 cdst;
+       u32 clli;
+       u32 cctl;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual
+ * channel states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+       PL08X_CHAN_IDLE,
+       PL08X_CHAN_RUNNING,
+       PL08X_CHAN_PAUSED,
+       PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @chan: wrappped abstract channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @runtime_direction: current direction of this channel according to
+ * runtime config
+ * @lc: last completed transaction on this channel
+ * @desc_list: queued transactions pending on this channel
+ * @at: active transaction on this channel
+ * @lockflags: sometimes we let a lock last between two function calls,
+ * especially prep/submit, and then we need to store the IRQ flags
+ * in the channel state, here
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @waiting: a TX descriptor on this channel which is waiting for
+ * a physical channel to become available
+ */
+struct pl08x_dma_chan {
+       struct dma_chan chan;
+       struct pl08x_phy_chan *phychan;
+       struct tasklet_struct tasklet;
+       char *name;
+       struct pl08x_channel_data *cd;
+       dma_addr_t runtime_addr;
+       enum dma_data_direction runtime_direction;
+       atomic_t last_issued;
+       dma_cookie_t lc;
+       struct list_head desc_list;
+       struct pl08x_txd *at;
+       unsigned long lockflags;
+       spinlock_t lock;
+       void *host;
+       enum pl08x_dma_chan_state state;
+       bool slave;
+       struct pl08x_txd *waiting;
+};
+
+/**
+ * struct pl08x_platform_data - the platform configuration for the
+ * PL08x PrimeCells.
+ * @slave_channels: the channels defined for the different devices on the
+ * platform, all inclusive, including multiplexed channels. The available
+ * physical channels will be multiplexed around these signals as they
+ * are requested, just enumerate all possible channels.
+ * @get_signal: request a physical signal to be used for a DMA
+ * transfer immediately: if there is some multiplexing or similar blocking
+ * the use of the channel the transfer can be denied by returning
+ * less than zero, else it returns the allocated signal number
+ * @put_signal: indicate to the platform that this physical signal is not
+ * running any DMA transfer and multiplexing can be recycled
+ * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
+ * LLI addresses are on 0/1 Master 1/2.
+ */
+struct pl08x_platform_data {
+       struct pl08x_channel_data *slave_channels;
+       unsigned int num_slave_channels;
+       struct pl08x_channel_data memcpy_channel;
+       int (*get_signal)(struct pl08x_dma_chan *);
+       void (*put_signal)(struct pl08x_dma_chan *);
+};
+
+#ifdef CONFIG_AMBA_PL08X
+bool pl08x_filter_id(struct dma_chan *chan, void *chan_id);
+#else
+static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
+{
+       return false;
+}
+#endif
+
+#endif /* AMBA_PL08X_H */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
new file mode 100644 (file)
index 0000000..198087a
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BASIC_MMIO_GPIO_H
+#define __BASIC_MMIO_GPIO_H
+
+struct bgpio_pdata {
+       int base;
+};
+
+#endif /* __BASIC_MMIO_GPIO_H */
index 646b462d04df6819e542113f94695d0d716cf721..5027a599077d89cd72aa5c09aaaa56254b971886 100644 (file)
@@ -891,6 +891,14 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
                                    nr_blocks << (sb->s_blocksize_bits - 9),
                                    gfp_mask, flags);
 }
+static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
+               sector_t nr_blocks, gfp_t gfp_mask)
+{
+       return blkdev_issue_zeroout(sb->s_bdev,
+                                   block << (sb->s_blocksize_bits - 9),
+                                   nr_blocks << (sb->s_blocksize_bits - 9),
+                                   gfp_mask);
+}
 
 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
 
index 709dfb901d1124c75656fb0f7591826683bd2c5d..ed4ba111bc8d32a34ff7c6558589f1d3dc7d073f 100644 (file)
@@ -154,6 +154,10 @@ enum {
         * A thread in rmdir() is wating for this cgroup.
         */
        CGRP_WAIT_ON_RMDIR,
+       /*
+        * Clone cgroup values when creating a new child cgroup
+        */
+       CGRP_CLONE_CHILDREN,
 };
 
 /* which pidlist file are we talking about? */
index 3a779ffba60bca25ac850637e85cd6fd8ab14642..7e8ca75d2dadbcca7869bee745a6e60e2dd8f851 100644 (file)
@@ -88,12 +88,6 @@ struct cn_queue_dev {
        unsigned char name[CN_CBQ_NAMELEN];
 
        struct workqueue_struct *cn_queue;
-       /* Sent to kevent to create cn_queue only when needed */
-       struct work_struct wq_creation;
-       /* Tell if the wq_creation job is pending/completed */
-       atomic_t wq_requested;
-       /* Wait for cn_queue to be created */
-       wait_queue_head_t wq_created;
 
        struct list_head queue_list;
        spinlock_t queue_lock;
@@ -141,8 +135,6 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
 
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
-
 struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
 void cn_queue_free_dev(struct cn_queue_dev *dev);
 
index e2106495cc11383ad9a14d7ac119400b18a8f83a..9d8688b92d8b02c46980f9a4397e4b2d0c313413 100644 (file)
@@ -64,13 +64,15 @@ enum dma_transaction_type {
        DMA_PQ_VAL,
        DMA_MEMSET,
        DMA_INTERRUPT,
+       DMA_SG,
        DMA_PRIVATE,
        DMA_ASYNC_TX,
        DMA_SLAVE,
+       DMA_CYCLIC,
 };
 
 /* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
+#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
 
 
 /**
@@ -119,12 +121,15 @@ enum dma_ctrl_flags {
  * configuration data in statically from the platform). An additional
  * argument of struct dma_slave_config must be passed in with this
  * command.
+ * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
+ * into external start mode.
  */
 enum dma_ctrl_cmd {
        DMA_TERMINATE_ALL,
        DMA_PAUSE,
        DMA_RESUME,
        DMA_SLAVE_CONFIG,
+       FSLDMA_EXTERNAL_START,
 };
 
 /**
@@ -316,14 +321,14 @@ struct dma_async_tx_descriptor {
        dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
        dma_async_tx_callback callback;
        void *callback_param;
-#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        struct dma_async_tx_descriptor *next;
        struct dma_async_tx_descriptor *parent;
        spinlock_t lock;
 #endif
 };
 
-#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
 {
 }
@@ -422,6 +427,9 @@ struct dma_tx_state {
  * @device_prep_dma_memset: prepares a memset operation
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
  * @device_prep_slave_sg: prepares a slave dma operation
+ * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
+ *     The function takes a buffer of size buf_len. The callback function will
+ *     be called after period_len bytes have been transferred.
  * @device_control: manipulate all pending operations on a channel, returns
  *     zero or error code
  * @device_tx_status: poll for transaction completion, the optional
@@ -473,11 +481,19 @@ struct dma_device {
                unsigned long flags);
        struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
                struct dma_chan *chan, unsigned long flags);
+       struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+               struct dma_chan *chan,
+               struct scatterlist *dst_sg, unsigned int dst_nents,
+               struct scatterlist *src_sg, unsigned int src_nents,
+               unsigned long flags);
 
        struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
                struct dma_chan *chan, struct scatterlist *sgl,
                unsigned int sg_len, enum dma_data_direction direction,
                unsigned long flags);
+       struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+               struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+               size_t period_len, enum dma_data_direction direction);
        int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                unsigned long arg);
 
@@ -487,6 +503,40 @@ struct dma_device {
        void (*device_issue_pending)(struct dma_chan *chan);
 };
 
+static inline int dmaengine_device_control(struct dma_chan *chan,
+                                          enum dma_ctrl_cmd cmd,
+                                          unsigned long arg)
+{
+       return chan->device->device_control(chan, cmd, arg);
+}
+
+static inline int dmaengine_slave_config(struct dma_chan *chan,
+                                         struct dma_slave_config *config)
+{
+       return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
+                       (unsigned long)config);
+}
+
+static inline int dmaengine_terminate_all(struct dma_chan *chan)
+{
+       return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+}
+
+static inline int dmaengine_pause(struct dma_chan *chan)
+{
+       return dmaengine_device_control(chan, DMA_PAUSE, 0);
+}
+
+static inline int dmaengine_resume(struct dma_chan *chan)
+{
+       return dmaengine_device_control(chan, DMA_RESUME, 0);
+}
+
+static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
+{
+       return desc->tx_submit(desc);
+}
+
 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
 {
        size_t mask;
@@ -606,11 +656,11 @@ static inline void net_dmaengine_put(void)
 #ifdef CONFIG_ASYNC_TX_DMA
 #define async_dmaengine_get()  dmaengine_get()
 #define async_dmaengine_put()  dmaengine_put()
-#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
 #else
 #define async_dma_find_channel(type) dma_find_channel(type)
-#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
+#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
 #else
 static inline void async_dmaengine_get(void)
 {
index f0268deca658be33a9d06d81469e45e3933ed0c3..7fca3dc4e47581ef77afc0096b36a7613a1acdb9 100644 (file)
@@ -931,6 +931,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 #define fb_writel sbus_writel
 #define fb_writeq sbus_writeq
 #define fb_memset sbus_memset_io
+#define fb_memcpy_fromfb sbus_memcpy_fromio
+#define fb_memcpy_tofb sbus_memcpy_toio
 
 #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
 
@@ -943,6 +945,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 #define fb_writel __raw_writel
 #define fb_writeq __raw_writeq
 #define fb_memset memset_io
+#define fb_memcpy_fromfb memcpy_fromio
+#define fb_memcpy_tofb memcpy_toio
 
 #else
 
@@ -955,6 +959,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 #define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
 #define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
 #define fb_memset memset
+#define fb_memcpy_fromfb memcpy
+#define fb_memcpy_tofb memcpy
 
 #endif
 
index 240eb1d4f87645672217429ed0699cf21ce84d72..6ed7ace74b7cedf08f539c6b5c0e4412471dad2e 100644 (file)
 #define SEEK_END       2       /* seek relative to end of file */
 #define SEEK_MAX       SEEK_END
 
+struct fstrim_range {
+       uint64_t start;
+       uint64_t len;
+       uint64_t minlen;
+};
+
 /* And dynamically-tunable limits and defaults: */
 struct files_stat_struct {
        unsigned long nr_files;         /* read only */
@@ -317,6 +323,7 @@ struct inodes_stat_t {
 #define FIGETBSZ   _IO(0x00,2) /* get the block size used for bmap */
 #define FIFREEZE       _IOWR('X', 119, int)    /* Freeze */
 #define FITHAW         _IOWR('X', 120, int)    /* Thaw */
+#define FITRIM         _IOWR('X', 121, struct fstrim_range)    /* Trim */
 
 #define        FS_IOC_GETFLAGS                 _IOR('f', 1, long)
 #define        FS_IOC_SETFLAGS                 _IOW('f', 2, long)
@@ -1122,6 +1129,7 @@ extern int fcntl_getlease(struct file *filp);
 
 /* fs/locks.c */
 extern void locks_init_lock(struct file_lock *);
+extern struct file_lock * locks_alloc_lock(void);
 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
 extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
 extern void locks_remove_posix(struct file *, fl_owner_t);
@@ -1310,6 +1318,11 @@ struct fasync_struct {
 
 /* SMP safe fasync helpers: */
 extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
+extern int fasync_remove_entry(struct file *, struct fasync_struct **);
+extern struct fasync_struct *fasync_alloc(void);
+extern void fasync_free(struct fasync_struct *);
+
 /* can be called from interrupts */
 extern void kill_fasync(struct fasync_struct **, int, int);
 
@@ -1598,6 +1611,7 @@ struct super_operations {
        ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
 #endif
        int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
+       int (*trim_fs) (struct super_block *, struct fstrim_range *);
 };
 
 /*
index 8a85ec109a3a57f1dbc8bd5a01a90fb6ad093bb3..e9138198e8239d878ba04e069112e45eef0d519a 100644 (file)
@@ -37,27 +37,6 @@ extern unsigned long totalhigh_pages;
 
 void kmap_flush_unused(void);
 
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
-
-static inline int kmap_atomic_idx_push(void)
-{
-       int idx = __get_cpu_var(__kmap_atomic_idx)++;
-#ifdef CONFIG_DEBUG_HIGHMEM
-       WARN_ON_ONCE(in_irq() && !irqs_disabled());
-       BUG_ON(idx > KM_TYPE_NR);
-#endif
-       return idx;
-}
-
-static inline int kmap_atomic_idx_pop(void)
-{
-       int idx = --__get_cpu_var(__kmap_atomic_idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
-       BUG_ON(idx < 0);
-#endif
-       return idx;
-}
-
 #else /* CONFIG_HIGHMEM */
 
 static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -95,6 +74,36 @@ static inline void __kunmap_atomic(void *addr)
 
 #endif /* CONFIG_HIGHMEM */
 
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+       int idx = __get_cpu_var(__kmap_atomic_idx)++;
+#ifdef CONFIG_DEBUG_HIGHMEM
+       WARN_ON_ONCE(in_irq() && !irqs_disabled());
+       BUG_ON(idx > KM_TYPE_NR);
+#endif
+       return idx;
+}
+
+static inline int kmap_atomic_idx(void)
+{
+       return __get_cpu_var(__kmap_atomic_idx) - 1;
+}
+
+static inline int kmap_atomic_idx_pop(void)
+{
+       int idx = --__get_cpu_var(__kmap_atomic_idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(idx < 0);
+#endif
+       return idx;
+}
+
+#endif
+
 /*
  * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
  */
index 269181b8f623d50a689f8091557187245e8783e8..3c5d6b6e765c22b06f6125a606eef9b5107958aa 100644 (file)
 
 #define ADP5588_DEVICE_ID_MASK 0xF
 
+ /* Configuration Register1 */
+#define ADP5588_AUTO_INC       (1 << 7)
+#define ADP5588_GPIEM_CFG      (1 << 6)
+#define ADP5588_INT_CFG                (1 << 4)
+#define ADP5588_GPI_IEN                (1 << 1)
+
+/* Interrupt Status Register */
+#define ADP5588_GPI_INT                (1 << 1)
+#define ADP5588_KE_INT         (1 << 0)
+
+#define ADP5588_MAXGPIO                18
+#define ADP5588_BANK(offs)     ((offs) >> 3)
+#define ADP5588_BIT(offs)      (1u << ((offs) & 0x7))
+
 /* Put one of these structures in i2c_board_info platform_data */
 
 #define ADP5588_KEYMAPSIZE     80
@@ -126,9 +140,12 @@ struct adp5588_kpad_platform_data {
        const struct adp5588_gpio_platform_data *gpio_data;
 };
 
+struct i2c_client; /* forward declaration */
+
 struct adp5588_gpio_platform_data {
-       unsigned gpio_start;            /* GPIO Chip base # */
-       unsigned pullup_dis_mask;       /* Pull-Up Disable Mask */
+       int gpio_start;         /* GPIO Chip base # */
+       unsigned irq_base;      /* interrupt base # */
+       unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
        int     (*setup)(struct i2c_client *client,
                                int gpio, unsigned ngpio,
                                void *context);
index 2fea6c8ef6babea0564ccf3b061698cacba1d14e..1f8c06ce0fa66b83760863735eaf1209908205d7 100644 (file)
@@ -29,6 +29,8 @@ extern struct fs_struct init_fs;
                .running = 0,                                           \
                .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock),        \
        },                                                              \
+       .cred_guard_mutex =                                             \
+                __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
 }
 
 extern struct nsproxy init_nsproxy;
@@ -145,8 +147,6 @@ extern struct cred init_cred;
        .group_leader   = &tsk,                                         \
        RCU_INIT_POINTER(.real_cred, &init_cred),                       \
        RCU_INIT_POINTER(.cred, &init_cred),                            \
-       .cred_guard_mutex =                                             \
-                __MUTEX_INITIALIZER(tsk.cred_guard_mutex),             \
        .comm           = "swapper",                                    \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
index d9d08b6269b61f53f0b45b4329b9089d037c2c38..10496bd24c5c1af69d680e76226913b389199d53 100644 (file)
 
 #include <linux/dmaengine.h>
 
-/*DMA transaction width, src and dstn width would be same
-The DMA length must be width aligned,
-for 32 bit width the length must be 32 bit (4bytes) aligned only*/
-enum intel_mid_dma_width {
-       LNW_DMA_WIDTH_8BIT = 0x0,
-       LNW_DMA_WIDTH_16BIT = 0x1,
-       LNW_DMA_WIDTH_32BIT = 0x2,
-};
+#define DMA_PREP_CIRCULAR_LIST         (1 << 10)
 
 /*DMA mode configurations*/
 enum intel_mid_dma_mode {
@@ -69,18 +62,15 @@ enum intel_mid_dma_msize {
  * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
  * @src_msize: Source DMA burst size
  * @dst_msize: Dst DMA burst size
+ * @per_addr: Periphral address
  * @device_instance: DMA peripheral device instance, we can have multiple
  *             peripheral device connected to single DMAC
  */
 struct intel_mid_dma_slave {
-       enum dma_data_direction         dirn;
-       enum intel_mid_dma_width        src_width; /*width of DMA src txn*/
-       enum intel_mid_dma_width        dst_width; /*width of DMA dst txn*/
        enum intel_mid_dma_hs_mode      hs_mode;  /*handshaking*/
        enum intel_mid_dma_mode         cfg_mode; /*mode configuration*/
-       enum intel_mid_dma_msize        src_msize; /*size if src burst*/
-       enum intel_mid_dma_msize        dst_msize; /*size of dst burst*/
        unsigned int            device_instance; /*0, 1 for periphral instance*/
+       struct dma_slave_config         dma_slave;
 };
 
 #endif /*__INTEL_MID_DMA_H__*/
index 01b2816462517509f41a3b732ac32fef3b93da7e..79d0c4f6d0719452c20494b1439d0d695e212e90 100644 (file)
@@ -410,7 +410,7 @@ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
 static inline void __raise_softirq_irqoff(unsigned int nr)
 {
-       trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+       trace_softirq_raise(nr);
        or_softirq_pending(1UL << nr);
 }
 
index 0b52924a0cb6ac5acda48540cb93d6b45a43beaf..2ae86aa21fcee12e169489a0e1dfdb5821ba48f2 100644 (file)
@@ -395,7 +395,7 @@ struct jbd2_inode {
        struct inode *i_vfs_inode;
 
        /* Flags of inode [j_list_lock] */
-       unsigned int i_flags;
+       unsigned long i_flags;
 };
 
 struct jbd2_revoke_table_s;
index c059044bc6dc11f7944208d6877b5438aed2e1e4..ad54c846911b91a169b903f7b1f6fee24d4320a2 100644 (file)
@@ -33,6 +33,7 @@ struct kernel_stat {
 #ifndef CONFIG_GENERIC_HARDIRQS
        unsigned int irqs[NR_IRQS];
 #endif
+       unsigned long irqs_sum;
        unsigned int softirqs[NR_SOFTIRQS];
 };
 
@@ -54,6 +55,7 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
                                            struct irq_desc *desc)
 {
        kstat_this_cpu.irqs[irq]++;
+       kstat_this_cpu.irqs_sum++;
 }
 
 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
@@ -65,8 +67,9 @@ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
 #define kstat_irqs_this_cpu(DESC) \
        ((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) \
-       ((DESC)->kstat_irqs[smp_processor_id()]++)
+#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
+       ((DESC)->kstat_irqs[smp_processor_id()]++);\
+       kstat_this_cpu.irqs_sum++; } while (0)
 
 #endif
 
@@ -83,6 +86,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
 /*
  * Number of interrupts per specific IRQ source, since bootup
  */
+#ifndef CONFIG_GENERIC_HARDIRQS
 static inline unsigned int kstat_irqs(unsigned int irq)
 {
        unsigned int sum = 0;
@@ -93,7 +97,17 @@ static inline unsigned int kstat_irqs(unsigned int irq)
 
        return sum;
 }
+#else
+extern unsigned int kstat_irqs(unsigned int irq);
+#endif
 
+/*
+ * Number of interrupts per cpu, since bootup
+ */
+static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+{
+       return kstat_cpu(cpu).irqs_sum;
+}
 
 /*
  * Lock/unlock the current runqueue - to extract task statistics:
index c238ad2f82eae4f149e70fc7138fdd5beaa40b13..10308c6a3d1c47e67c9b89a4900fff696210b67d 100644 (file)
@@ -171,8 +171,17 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
        }
 
 
-/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */
-#define __kfifo_must_check_helper(x) (x)
+static inline unsigned int __must_check
+__kfifo_uint_must_check_helper(unsigned int val)
+{
+       return val;
+}
+
+static inline int __must_check
+__kfifo_int_must_check_helper(int val)
+{
+       return val;
+}
 
 /**
  * kfifo_initialized - Check if the fifo is initialized
@@ -264,7 +273,7 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
  * @fifo: address of the fifo to be used
  */
 #define        kfifo_avail(fifo) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
@@ -297,7 +306,7 @@ __kfifo_must_check_helper( \
  * This function returns the size of the next fifo record in number of bytes.
  */
 #define kfifo_peek_len(fifo) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
@@ -320,7 +329,7 @@ __kfifo_must_check_helper( \
  * Return 0 if no error, otherwise an error code.
  */
 #define kfifo_alloc(fifo, size, gfp_mask) \
-__kfifo_must_check_helper( \
+__kfifo_int_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -416,7 +425,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_get(fifo, val) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((val) + 1) __val = (val); \
@@ -457,7 +466,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_peek(fifo, val) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((val) + 1) __val = (val); \
@@ -549,7 +558,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_out(fifo, buf, n) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((buf) + 1) __buf = (buf); \
@@ -577,7 +586,7 @@ __kfifo_must_check_helper( \
  * copied.
  */
 #define        kfifo_out_spinlocked(fifo, buf, n, lock) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        unsigned long __flags; \
        unsigned int __ret; \
@@ -606,7 +615,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_from_user(fifo, from, len, copied) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
@@ -634,7 +643,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_to_user(fifo, to, len, copied) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
@@ -761,7 +770,7 @@ __kfifo_must_check_helper( \
  * writer, you don't need extra locking to use these macro.
  */
 #define        kfifo_out_peek(fifo, buf, n) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
 ({ \
        typeof((fifo) + 1) __tmp = (fifo); \
        typeof((buf) + 1) __buf = (buf); \
index fcd3dda8632282c1d23882c28f0a1b4d3bb9828f..072652d94d9f5afe908f982c1acce9339bfbfc32 100644 (file)
@@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
                table->ents[hash & table->mask] = RPS_NO_CPU;
 }
 
-extern struct rps_sock_flow_table *rps_sock_flow_table;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
 /* This structure contains an instance of an RX queue. */
 struct netdev_rx_queue {
-       struct rps_map *rps_map;
-       struct rps_dev_flow_table *rps_flow_table;
-       struct kobject kobj;
-       struct netdev_rx_queue *first;
-       atomic_t count;
+       struct rps_map __rcu            *rps_map;
+       struct rps_dev_flow_table __rcu *rps_flow_table;
+       struct kobject                  kobj;
+       struct netdev_rx_queue          *first;
+       atomic_t                        count;
 } ____cacheline_aligned_in_smp;
 #endif /* CONFIG_RPS */
 
@@ -944,7 +944,7 @@ struct net_device {
        /* Protocol specific pointers */
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       struct vlan_group       *vlgrp;         /* VLAN group */
+       struct vlan_group __rcu *vlgrp;         /* VLAN group */
 #endif
 #ifdef CONFIG_NET_DSA
        void                    *dsa_ptr;       /* dsa specific data */
@@ -952,7 +952,7 @@ struct net_device {
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
        void                    *dn_ptr;        /* DECnet specific data */
-       void                    *ip6_ptr;       /* IPv6 specific data */
+       struct inet6_dev __rcu  *ip6_ptr;       /* IPv6 specific data */
        void                    *ec_ptr;        /* Econet specific data */
        void                    *ax25_ptr;      /* AX.25 specific data */
        struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
@@ -1072,7 +1072,7 @@ struct net_device {
                struct pcpu_dstats __percpu     *dstats; /* dummy stats */
        };
        /* GARP */
-       struct garp_port        *garp_port;
+       struct garp_port __rcu  *garp_port;
 
        /* class/net/name entry */
        struct device           dev;
index 018db9a62ffe7b08ea64a967ab0b0a0a5caedf44..27ef6b190ea6cc5ee84c53dbcd9a38f5c09164d6 100644 (file)
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                         \
        DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
 
-/*
- * Declaration/definition used for large per-CPU variables that must be
- * aligned to something larger than the pagesize.
- */
-#define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)            \
-       DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")           \
-       __aligned(size)
-
-#define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)             \
-       DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
-       __aligned(size)
-
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
index 8a7d510ffa9cb0fd11759d1224d125ff4e76a17f..46f6ba56fa9139909c04acbad98383b7ed88babb 100644 (file)
@@ -78,6 +78,11 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
        return 1;
 }
 
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+       return (fbc->counters != NULL);
+}
+
 #else
 
 struct percpu_counter {
@@ -143,6 +148,11 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
        return percpu_counter_read(fbc);
 }
 
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+       return 1;
+}
+
 #endif /* CONFIG_SMP */
 
 static inline void percpu_counter_inc(struct percpu_counter *fbc)
index a6e047a04f798ed1ae0f75583ad7a2430b2c3958..7da5fa845959ab3dc9c2a393d7336a318c113aeb 100644 (file)
@@ -472,11 +472,7 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
 int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
 struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
 int phy_device_register(struct phy_device *phy);
-int phy_clear_interrupt(struct phy_device *phydev);
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
 int phy_init_hw(struct phy_device *phydev);
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
-               u32 flags, phy_interface_t interface);
 struct phy_device * phy_attach(struct net_device *dev,
                const char *bus_id, u32 flags, phy_interface_t interface);
 struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -492,17 +488,12 @@ void phy_start(struct phy_device *phydev);
 void phy_stop(struct phy_device *phydev);
 int phy_start_aneg(struct phy_device *phydev);
 
-void phy_sanitize_settings(struct phy_device *phydev);
 int phy_stop_interrupts(struct phy_device *phydev);
-int phy_enable_interrupts(struct phy_device *phydev);
-int phy_disable_interrupts(struct phy_device *phydev);
 
 static inline int phy_read_status(struct phy_device *phydev) {
        return phydev->drv->read_status(phydev);
 }
 
-int genphy_config_advert(struct phy_device *phydev);
-int genphy_setup_forced(struct phy_device *phydev);
 int genphy_restart_aneg(struct phy_device *phydev);
 int genphy_config_aneg(struct phy_device *phydev);
 int genphy_update_link(struct phy_device *phydev);
@@ -511,8 +502,6 @@ int genphy_suspend(struct phy_device *phydev);
 int genphy_resume(struct phy_device *phydev);
 void phy_driver_unregister(struct phy_driver *drv);
 int phy_driver_register(struct phy_driver *new_driver);
-void phy_prepare_link(struct phy_device *phydev,
-               void (*adjust_link)(struct net_device *));
 void phy_state_machine(struct work_struct *work);
 void phy_start_machine(struct phy_device *phydev,
                void (*handler)(struct net_device *));
@@ -523,7 +512,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
                struct ifreq *ifr, int cmd);
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
 void phy_device_free(struct phy_device *phydev);
 
 int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
index 600cc1fde64dbc96b9ddcae98bd79e2254ef40db..56e76af7810211b659795cb79c4d722eb3496c67 100644 (file)
@@ -73,6 +73,8 @@ extern void poll_initwait(struct poll_wqueues *pwq);
 extern void poll_freewait(struct poll_wqueues *pwq);
 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
                                 ktime_t *expires, unsigned long slack);
+extern long select_estimate_accuracy(struct timespec *tv);
+
 
 static inline int poll_schedule(struct poll_wqueues *pwq, int state)
 {
index 4272521e29e9d5e85483a1cf7d14af96940bcb68..092a04f874a850ad66537aca4a2bfa7b7e973606 100644 (file)
 #include <linux/sched.h>               /* For struct task_struct.  */
 
 
-extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
+extern long arch_ptrace(struct task_struct *child, long request,
+                       unsigned long addr, unsigned long data);
 extern int ptrace_traceme(void);
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
@@ -108,7 +109,8 @@ extern int ptrace_attach(struct task_struct *tsk);
 extern int ptrace_detach(struct task_struct *, unsigned int);
 extern void ptrace_disable(struct task_struct *);
 extern int ptrace_check_attach(struct task_struct *task, int kill);
-extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
+extern int ptrace_request(struct task_struct *child, long request,
+                         unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
 extern void __ptrace_link(struct task_struct *child,
                          struct task_struct *new_parent);
@@ -132,8 +134,10 @@ static inline void ptrace_unlink(struct task_struct *child)
                __ptrace_unlink(child);
 }
 
-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data);
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data);
 
 /**
  * task_ptrace - return %PT_* flags that apply to a task
diff --git a/include/linux/ramoops.h b/include/linux/ramoops.h
new file mode 100644 (file)
index 0000000..0ae68a2
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __RAMOOPS_H
+#define __RAMOOPS_H
+
+/*
+ * Ramoops platform data
+ * @mem_size   memory size for ramoops
+ * @mem_address        physical memory address to contain ramoops
+ */
+
+struct ramoops_platform_data {
+       unsigned long   mem_size;
+       unsigned long   mem_address;
+};
+
+#endif
index 25b4f686d9189242f6e6d817b563101f7ce07262..8d3a2486544d1d91583e703b45d9b90794d2cca1 100644 (file)
@@ -62,18 +62,6 @@ enum ring_buffer_type {
 unsigned ring_buffer_event_length(struct ring_buffer_event *event);
 void *ring_buffer_event_data(struct ring_buffer_event *event);
 
-/**
- * ring_buffer_event_time_delta - return the delta timestamp of the event
- * @event: the event to get the delta timestamp of
- *
- * The delta timestamp is the 27 bit timestamp since the last event.
- */
-static inline unsigned
-ring_buffer_event_time_delta(struct ring_buffer_event *event)
-{
-       return event->time_delta;
-}
-
 /*
  * ring_buffer_discard_commit will remove an event that has not
  *   ben committed yet. If this is used, then ring_buffer_unlock_commit
index bd6eb0ed34a7d743f370295390fd440f5234b2e1..0bed941f9b134e615f59b2223752812e5a0ac6e4 100644 (file)
@@ -67,6 +67,7 @@
 #define RIO_PW_MSG_SIZE                64
 
 extern struct bus_type rio_bus_type;
+extern struct device rio_bus;
 extern struct list_head rio_devices;   /* list of all devices */
 
 struct rio_mport;
@@ -98,6 +99,7 @@ union rio_pw_msg;
  * @riores: RIO resources this device owns
  * @pwcback: port-write callback function for this device
  * @destid: Network destination ID
+ * @prev: Previous RIO device connected to the current one
  */
 struct rio_dev {
        struct list_head global_list;   /* node in list of all RIO devices */
@@ -111,7 +113,7 @@ struct rio_dev {
        u16 asm_rev;
        u16 efptr;
        u32 pef;
-       u32 swpinfo;            /* Only used for switches */
+       u32 swpinfo;
        u32 src_ops;
        u32 dst_ops;
        u32 comp_tag;
@@ -124,6 +126,7 @@ struct rio_dev {
        struct resource riores[RIO_MAX_DEV_RESOURCES];
        int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
        u16 destid;
+       struct rio_dev *prev;
 };
 
 #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
@@ -174,6 +177,7 @@ enum rio_phy_type {
  * @index: Port index, unique among all port interfaces of the same type
  * @sys_size: RapidIO common transport system size
  * @phy_type: RapidIO phy type
+ * @phys_efptr: RIO port extended features pointer
  * @name: Port name string
  * @priv: Master port private data
  */
@@ -195,6 +199,7 @@ struct rio_mport {
                                 * 1 - Large size, 65536 devices.
                                 */
        enum rio_phy_type phy_type;     /* RapidIO phy type */
+       u32 phys_efptr;
        unsigned char name[40];
        void *priv;             /* Master port private data */
 };
@@ -215,9 +220,14 @@ struct rio_net {
        unsigned char id;       /* RIO network ID */
 };
 
+/* Definitions used by switch sysfs initialization callback */
+#define RIO_SW_SYSFS_CREATE    1       /* Create switch attributes */
+#define RIO_SW_SYSFS_REMOVE    0       /* Remove switch attributes */
+
 /**
  * struct rio_switch - RIO switch info
  * @node: Node in global list of switches
+ * @rdev: Associated RIO device structure
  * @switchid: Switch ID that is unique across a network
  * @hopcount: Hopcount to this switch
  * @destid: Associated destid in the path
@@ -230,9 +240,12 @@ struct rio_net {
  * @get_domain: Callback for switch-specific domain get function
  * @em_init: Callback for switch-specific error management initialization function
  * @em_handle: Callback for switch-specific error management handler function
+ * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
+ * @nextdev: Array of per-port pointers to the next attached device
  */
 struct rio_switch {
        struct list_head node;
+       struct rio_dev *rdev;
        u16 switchid;
        u16 hopcount;
        u16 destid;
@@ -250,6 +263,8 @@ struct rio_switch {
                           u8 *sw_domain);
        int (*em_init) (struct rio_dev *dev);
        int (*em_handle) (struct rio_dev *dev, u8 swport);
+       int (*sw_sysfs) (struct rio_dev *dev, int create);
+       struct rio_dev *nextdev[0];
 };
 
 /* Low-level architecture-dependent routines */
index db50e1c288b7b48be7e3bbd6fad70b934f42e588..ee7b6ada188f664d760b2f3c7c4eaf1832c7eab3 100644 (file)
@@ -34,5 +34,7 @@
 #define RIO_DID_IDTCPS16               0x035b
 #define RIO_DID_IDTCPS6Q               0x035f
 #define RIO_DID_IDTCPS10Q              0x035e
+#define RIO_DID_IDTCPS1848             0x0374
+#define RIO_DID_IDTCPS1616             0x0379
 
 #endif                         /* LINUX_RIO_IDS_H */
index aedee0489fb41991b8b018b57a449a867b039434..d63dcbaea169e857f1d9f62eabdf32dd69ec9413 100644 (file)
@@ -33,6 +33,7 @@
 #define  RIO_PEF_MEMORY                        0x40000000      /* [I] MMIO */
 #define  RIO_PEF_PROCESSOR             0x20000000      /* [I] Processor */
 #define  RIO_PEF_SWITCH                        0x10000000      /* [I] Switch */
+#define  RIO_PEF_MULTIPORT             0x08000000      /* [VI, 2.1] Multiport */
 #define  RIO_PEF_INB_MBOX              0x00f00000      /* [II] Mailboxes */
 #define  RIO_PEF_INB_MBOX0             0x00800000      /* [II] Mailbox 0 */
 #define  RIO_PEF_INB_MBOX1             0x00400000      /* [II] Mailbox 1 */
@@ -51,6 +52,7 @@
 #define  RIO_SWP_INFO_PORT_TOTAL_MASK  0x0000ff00      /* [I] Total number of ports */
 #define  RIO_SWP_INFO_PORT_NUM_MASK    0x000000ff      /* [I] Maintenance transaction port number */
 #define  RIO_GET_TOTAL_PORTS(x)                ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8)
+#define  RIO_GET_PORT_NUM(x)           (x & RIO_SWP_INFO_PORT_NUM_MASK)
 
 #define RIO_SRC_OPS_CAR                0x18    /* [I] Source Operations CAR */
 #define  RIO_SRC_OPS_READ              0x00008000      /* [I] Read op */
 #define RIO_COMPONENT_TAG_CSR  0x6c    /* [III] Component Tag CSR */
 
 #define RIO_STD_RTE_CONF_DESTID_SEL_CSR        0x70
+#define  RIO_STD_RTE_CONF_EXTCFGEN             0x80000000
 #define RIO_STD_RTE_CONF_PORT_SEL_CSR  0x74
 #define RIO_STD_RTE_DEFAULT_PORT       0x78
 
 #define  RIO_PORT_GEN_MASTER           0x40000000
 #define  RIO_PORT_GEN_DISCOVERED       0x20000000
 #define RIO_PORT_N_MNT_REQ_CSR(x)      (0x0040 + x*0x20)       /* 0x0002 */
+#define  RIO_MNT_REQ_CMD_RD            0x03    /* Reset-device command */
+#define  RIO_MNT_REQ_CMD_IS            0x04    /* Input-status command */
 #define RIO_PORT_N_MNT_RSP_CSR(x)      (0x0044 + x*0x20)       /* 0x0002 */
 #define  RIO_PORT_N_MNT_RSP_RVAL       0x80000000 /* Response Valid */
-#define  RIO_PORT_N_MNT_RSP_ASTAT      0x000003e0 /* ackID Status */
+#define  RIO_PORT_N_MNT_RSP_ASTAT      0x000007e0 /* ackID Status */
 #define  RIO_PORT_N_MNT_RSP_LSTAT      0x0000001f /* Link Status */
 #define RIO_PORT_N_ACK_STS_CSR(x)      (0x0048 + x*0x20)       /* 0x0002 */
 #define  RIO_PORT_N_ACK_CLEAR          0x80000000
-#define  RIO_PORT_N_ACK_INBOUND                0x1f000000
-#define  RIO_PORT_N_ACK_OUTSTAND       0x00001f00
-#define  RIO_PORT_N_ACK_OUTBOUND       0x0000001f
+#define  RIO_PORT_N_ACK_INBOUND                0x3f000000
+#define  RIO_PORT_N_ACK_OUTSTAND       0x00003f00
+#define  RIO_PORT_N_ACK_OUTBOUND       0x0000003f
 #define RIO_PORT_N_ERR_STS_CSR(x)      (0x0058 + x*0x20)
 #define  RIO_PORT_N_ERR_STS_PW_OUT_ES  0x00010000 /* Output Error-stopped */
 #define  RIO_PORT_N_ERR_STS_PW_INP_ES  0x00000100 /* Input Error-stopped */
 #define  RIO_PORT_N_ERR_STS_PORT_ERR   0x00000004
 #define  RIO_PORT_N_ERR_STS_PORT_OK    0x00000002
 #define  RIO_PORT_N_ERR_STS_PORT_UNINIT        0x00000001
-#define  RIO_PORT_N_ERR_STS_CLR_MASK   0x07120204
 #define RIO_PORT_N_CTL_CSR(x)          (0x005c + x*0x20)
 #define  RIO_PORT_N_CTL_PWIDTH         0xc0000000
 #define  RIO_PORT_N_CTL_PWIDTH_1       0x00000000
 #define RIO_EM_EFB_HEADER      0x000   /* Error Management Extensions Block Header */
 #define RIO_EM_LTL_ERR_DETECT  0x008   /* Logical/Transport Layer Error Detect CSR */
 #define RIO_EM_LTL_ERR_EN      0x00c   /* Logical/Transport Layer Error Enable CSR */
+#define  REM_LTL_ERR_ILLTRAN           0x08000000 /* Illegal Transaction decode */
+#define  REM_LTL_ERR_UNSOLR            0x00800000 /* Unsolicited Response */
+#define  REM_LTL_ERR_UNSUPTR           0x00400000 /* Unsupported Transaction */
+#define  REM_LTL_ERR_IMPSPEC           0x000000ff /* Implementation Specific */
 #define RIO_EM_LTL_HIADDR_CAP  0x010   /* Logical/Transport Layer High Address Capture CSR */
 #define RIO_EM_LTL_ADDR_CAP    0x014   /* Logical/Transport Layer Address Capture CSR */
 #define RIO_EM_LTL_DEVID_CAP   0x018   /* Logical/Transport Layer Device ID Capture CSR */
index 393ce94e54b77155227f5df48cf7e81e6b9ec09f..be7adb7588e5e5519c45caff28534cb3e2476b72 100644 (file)
@@ -626,6 +626,10 @@ struct signal_struct {
 
        int oom_adj;            /* OOM kill score adjustment (bit shift) */
        int oom_score_adj;      /* OOM kill score adjustment */
+
+       struct mutex cred_guard_mutex;  /* guard against foreign influences on
+                                        * credential calculations
+                                        * (notably. ptrace) */
 };
 
 /* Context switch must be unlocked if interrupts are to be enabled */
@@ -1305,9 +1309,6 @@ struct task_struct {
                                         * credentials (COW) */
        const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
-       struct mutex cred_guard_mutex;  /* guard against foreign influences on
-                                        * credential calculations
-                                        * (notably. ptrace) */
        struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
 
        char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -2236,9 +2237,16 @@ static inline void task_unlock(struct task_struct *p)
        spin_unlock(&p->alloc_lock);
 }
 
-extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
                                                        unsigned long *flags);
 
+#define lock_task_sighand(tsk, flags)                                  \
+({     struct sighand_struct *__ss;                                    \
+       __cond_lock(&(tsk)->sighand->siglock,                           \
+                   (__ss = __lock_task_sighand(tsk, flags)));          \
+       __ss;                                                           \
+})                                                                     \
+
 static inline void unlock_task_sighand(struct task_struct *tsk,
                                                unsigned long *flags)
 {
index cfa2d20e35f152a8bad87ec32bcd27f206214c69..6dc95cac6b3dabc390ac271cca9a0e4fb16436ac 100644 (file)
 
 extern void cpu_idle(void);
 
+typedef void (*smp_call_func_t)(void *info);
 struct call_single_data {
        struct list_head list;
-       void (*func) (void *info);
+       smp_call_func_t func;
        void *info;
        u16 flags;
        u16 priv;
@@ -24,8 +25,8 @@ struct call_single_data {
 /* total number of cpus in this system (may exceed NR_CPUS) */
 extern unsigned int total_cpus;
 
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-                               int wait);
+int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
+                            int wait);
 
 #ifdef CONFIG_SMP
 
@@ -69,15 +70,15 @@ extern void smp_cpus_done(unsigned int max_cpus);
 /*
  * Call a function on all other processors
  */
-int smp_call_function(void(*func)(void *info), void *info, int wait);
+int smp_call_function(smp_call_func_t func, void *info, int wait);
 void smp_call_function_many(const struct cpumask *mask,
-                           void (*func)(void *info), void *info, bool wait);
+                           smp_call_func_t func, void *info, bool wait);
 
 void __smp_call_function_single(int cpuid, struct call_single_data *data,
                                int wait);
 
 int smp_call_function_any(const struct cpumask *mask,
-                         void (*func)(void *info), void *info, int wait);
+                         smp_call_func_t func, void *info, int wait);
 
 /*
  * Generic and arch helpers
@@ -94,7 +95,7 @@ void ipi_call_unlock_irq(void);
 /*
  * Call a function on all processors
  */
-int on_each_cpu(void (*func) (void *info), void *info, int wait);
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
 
 #define MSG_ALL_BUT_SELF       0x8000  /* Assume <32768 CPU's */
 #define MSG_ALL                        0x8001
@@ -122,7 +123,7 @@ static inline void smp_send_stop(void) { }
  *     These macros fold the SMP functionality into a single CPU system
  */
 #define raw_smp_processor_id()                 0
-static inline int up_smp_call_function(void (*func)(void *), void *info)
+static inline int up_smp_call_function(smp_call_func_t func, void *info)
 {
        return 0;
 }
@@ -143,7 +144,7 @@ static inline void smp_send_reschedule(int cpu) { }
 static inline void init_call_single_data(void) { }
 
 static inline int
-smp_call_function_any(const struct cpumask *mask, void (*func)(void *info),
+smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
                      void *info, int wait)
 {
        return smp_call_function_single(0, func, info, wait);
diff --git a/include/linux/spi/74x164.h b/include/linux/spi/74x164.h
new file mode 100644 (file)
index 0000000..d85c52f
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef LINUX_SPI_74X164_H
+#define LINUX_SPI_74X164_H
+
+#define GEN_74X164_DRIVER_NAME "74x164"
+
+struct gen_74x164_chip_platform_data {
+       /* number assigned to the first GPIO */
+       unsigned        base;
+};
+
+#endif
index 0ff2779c44d09f28e773cdb5f2014198e7be7c42..2e7d81c4e5adcf54525d409b01a04644a7e2b35d 100644 (file)
 #define MGSL_MODE_BISYNC       4
 #define MGSL_MODE_RAW          6
 #define MGSL_MODE_BASE_CLOCK    7
+#define MGSL_MODE_XSYNC         8
 
 #define MGSL_BUS_TYPE_ISA      1
 #define MGSL_BUS_TYPE_EISA     2
@@ -290,6 +291,10 @@ struct gpio_desc {
 #define MGSL_IOCSGPIO          _IOW(MGSL_MAGIC_IOC,16,struct gpio_desc)
 #define MGSL_IOCGGPIO          _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc)
 #define MGSL_IOCWAITGPIO       _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc)
+#define MGSL_IOCSXSYNC         _IO(MGSL_MAGIC_IOC, 19)
+#define MGSL_IOCGXSYNC         _IO(MGSL_MAGIC_IOC, 20)
+#define MGSL_IOCSXCTRL         _IO(MGSL_MAGIC_IOC, 21)
+#define MGSL_IOCGXCTRL         _IO(MGSL_MAGIC_IOC, 22)
 
 #ifdef __KERNEL__
 /* provide 32 bit ioctl compatibility on 64 bit systems */
index e6319d18a55d93066ac49b39ecd05de5e9be0424..cacc27a0e285163d9a8727a4131ffed478b8f46c 100644 (file)
@@ -701,7 +701,8 @@ asmlinkage long sys_nfsservctl(int cmd,
 asmlinkage long sys_syslog(int type, char __user *buf, int len);
 asmlinkage long sys_uselib(const char __user *library);
 asmlinkage long sys_ni_syscall(void);
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data);
+asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
+                          unsigned long data);
 
 asmlinkage long sys_add_key(const char __user *_type,
                            const char __user *_description,
index 10db0102a890b8baa7905cf2b73cded2314f7f30..3a2e66d88a32c41db9e978c700601c4c132ea54c 100644 (file)
@@ -150,7 +150,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
  *
  * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
  *
- * @task->cred_guard_mutex is held by the caller through the do_execve().
+ * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
  */
 static inline int tracehook_unsafe_exec(struct task_struct *task)
 {
index 1faa80d92f05d14fe1c769ad5736b3f6065b0841..e68b439b2860e7aa5e9490fec542fdfe245c6b2f 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
-#include <linux/types.h>
 
 /* The feature bitmap for virtio 9P */
 
index d5c7aaadda59a926032794a4d3a27a46f01bab3c..09eec350054d0259fb36f2e1b1447f6b5a0bde72 100644 (file)
@@ -141,6 +141,8 @@ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
 
 int generic_writepages(struct address_space *mapping,
                       struct writeback_control *wbc);
+void tag_pages_for_writeback(struct address_space *mapping,
+                            pgoff_t start, pgoff_t end);
 int write_cache_pages(struct address_space *mapping,
                      struct writeback_control *wbc, writepage_t writepage,
                      void *data);
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
new file mode 100644 (file)
index 0000000..5bcce55
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SHM_H_
+#define CAIF_SHM_H_
+
+struct shmdev_layer {
+       u32 shm_base_addr;
+       u32 shm_total_sz;
+       u32 shm_id;
+       u32 shm_loopback;
+       void *hmbx;
+       int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
+       int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
+                               struct shmdev_layer *pshm_dev, void *pshm_drv);
+       struct net_device *pshm_netdev;
+};
+
+extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
+extern void caif_shmcore_remove(struct net_device *pshm_netdev);
+
+#endif
index a217c838ec0dc649463a999c5c4d55cf6d53585d..ffe9cb719c0e526302bb2d4635c6f402ce31c867 100644 (file)
@@ -95,7 +95,7 @@ struct dst_entry {
        unsigned long           lastuse;
        union {
                struct dst_entry *next;
-               struct rtable    *rt_next;
+               struct rtable __rcu *rt_next;
                struct rt6_info   *rt6_next;
                struct dn_route  *dn_next;
        };
index 106f3097d38452e9764e60115f88ea768d6cb15c..075f1e3a0fedf77dbe1cebb42e41ac930c304fca 100644 (file)
@@ -20,7 +20,7 @@ struct fib_rule {
        u32                     table;
        u8                      action;
        u32                     target;
-       struct fib_rule *       ctarget;
+       struct fib_rule __rcu   *ctarget;
        char                    iifname[IFNAMSIZ];
        char                    oifname[IFNAMSIZ];
        struct rcu_head         rcu;
index 825f172caba97e607fb3de28d3ba5c4009519419..f4c295984c455782aa9c5512cdc15a3621874937 100644 (file)
@@ -107,7 +107,7 @@ struct garp_applicant {
 };
 
 struct garp_port {
-       struct garp_applicant   *applicants[GARP_APPLICATION_MAX + 1];
+       struct garp_applicant __rcu     *applicants[GARP_APPLICATION_MAX + 1];
 };
 
 extern int     garp_register_application(struct garp_application *app);
index 417d0c894f29e29c096a9bf5ad7888b42d54976a..fe239bfe5f7f5082d92214bcb8d48d183473674b 100644 (file)
@@ -15,7 +15,7 @@
 
 struct inet_peer {
        /* group together avl_left,avl_right,v4daddr to speedup lookups */
-       struct inet_peer        *avl_left, *avl_right;
+       struct inet_peer __rcu  *avl_left, *avl_right;
        __be32                  v4daddr;        /* peer's address */
        __u32                   avl_height;
        struct list_head        unused;
index dbee3fe260e13877e28357c2477a66582cd1c03e..86e2b182a0c09eddab243d4b33bd00d03dc89f70 100644 (file)
@@ -59,7 +59,7 @@ struct ipcm_cookie {
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 
 struct ip_ra_chain {
-       struct ip_ra_chain      *next;
+       struct ip_ra_chain __rcu *next;
        struct sock             *sk;
        union {
                void                    (*destructor)(struct sock *);
@@ -68,7 +68,7 @@ struct ip_ra_chain {
        struct rcu_head         rcu;
 };
 
-extern struct ip_ra_chain *ip_ra_chain;
+extern struct ip_ra_chain __rcu *ip_ra_chain;
 
 /* IP flags. */
 #define IP_CE          0x8000          /* Flag: "Congestion"           */
index fc94ec568a50a54db39053b9bdcf0d862c7eba9a..fc73e667b50e4e79d506fa536b9635ac39e787c9 100644 (file)
@@ -13,7 +13,7 @@
 /* IPv6 tunnel */
 
 struct ip6_tnl {
-       struct ip6_tnl *next;   /* next tunnel in list */
+       struct ip6_tnl __rcu *next;     /* next tunnel in list */
        struct net_device *dev; /* virtual device associated with tunnel */
        struct ip6_tnl_parm parms;      /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
index 58abbf966b0c816a3f22d0060fa0ad8aa08113ea..a32654d52730d751a4c89dd8f8fea98b57add370 100644 (file)
@@ -16,7 +16,7 @@ struct ip_tunnel_6rd_parm {
 };
 
 struct ip_tunnel {
-       struct ip_tunnel        *next;
+       struct ip_tunnel __rcu  *next;
        struct net_device       *dev;
 
        int                     err_count;      /* Number of arrived ICMP errors */
@@ -34,12 +34,12 @@ struct ip_tunnel {
 #ifdef CONFIG_IPV6_SIT_6RD
        struct ip_tunnel_6rd_parm       ip6rd;
 #endif
-       struct ip_tunnel_prl_entry      *prl;           /* potential router list */
+       struct ip_tunnel_prl_entry __rcu *prl;          /* potential router list */
        unsigned int                    prl_count;      /* # of entries in PRL */
 };
 
 struct ip_tunnel_prl_entry {
-       struct ip_tunnel_prl_entry      *next;
+       struct ip_tunnel_prl_entry __rcu *next;
        __be32                          addr;
        u16                             flags;
        struct rcu_head                 rcu_head;
index 65af9a07cf766b3fe0309149cd46fcc6495aaffb..1bf812b21fb706a8c108c1d2faeb40986f552746 100644 (file)
@@ -88,7 +88,7 @@ struct net {
 #ifdef CONFIG_WEXT_CORE
        struct sk_buff_head     wext_nlevents;
 #endif
-       struct net_generic      *gen;
+       struct net_generic __rcu        *gen;
 
        /* Note : following structs are cache line aligned */
 #ifdef CONFIG_XFRM
index f1effdd3c26522380cf01562bac86031c9c62047..dc07495bce4cb0dc7807bdaa390d62881fc7d614 100644 (file)
@@ -89,10 +89,10 @@ struct inet_protosw {
 #define INET_PROTOSW_PERMANENT 0x02  /* Permanent protocols are unremovable. */
 #define INET_PROTOSW_ICSK      0x04  /* Is this an inet_connection_sock? */
 
-extern const struct net_protocol *inet_protos[MAX_INET_PROTOS];
+extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
+extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
 extern int     inet_add_protocol(const struct net_protocol *prot, unsigned char num);
index 73a4f9702a65c816c3701ab7fa9777fdbcaa2c72..c7a736228ca2dcab930f7f4a9f07d5fda2858c7c 100644 (file)
@@ -301,7 +301,7 @@ struct sock {
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
        long                    sk_sndtimeo;
-       struct sk_filter        *sk_filter;
+       struct sk_filter __rcu  *sk_filter;
        void                    *sk_protinfo;
        struct timer_list       sk_timer;
        ktime_t                 sk_stamp;
index f28d7c9b9f8d4d46978e83060e51dbbcda32bf3b..bcfb6b24b019cf398bb3674aa13cc29d5b423dc6 100644 (file)
@@ -1264,7 +1264,7 @@ struct xfrm_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, u32 info);
 
-       struct xfrm_tunnel *next;
+       struct xfrm_tunnel __rcu *next;
        int priority;
 };
 
@@ -1272,7 +1272,7 @@ struct xfrm6_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
                           u8 type, u8 code, int offset, __be32 info);
-       struct xfrm6_tunnel *next;
+       struct xfrm6_tunnel __rcu *next;
        int priority;
 };
 
index 6bcb00645de47ffb9262c1c366fa14e82291e887..289010d3270bfa99191cfbdfa95a02564b6638bf 100644 (file)
@@ -21,7 +21,8 @@ TRACE_EVENT(ext4_free_inode,
        TP_ARGS(inode),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        umode_t, mode                   )
                __field(        uid_t,  uid                     )
@@ -30,7 +31,8 @@ TRACE_EVENT(ext4_free_inode,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->mode   = inode->i_mode;
                __entry->uid    = inode->i_uid;
@@ -38,9 +40,10 @@ TRACE_EVENT(ext4_free_inode,
                __entry->blocks = inode->i_blocks;
        ),
 
-       TP_printk("dev %s ino %lu mode 0%o uid %u gid %u blocks %llu",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->mode, __entry->uid, __entry->gid,
+       TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->mode,
+                 __entry->uid, __entry->gid,
                  (unsigned long long) __entry->blocks)
 );
 
@@ -50,20 +53,22 @@ TRACE_EVENT(ext4_request_inode,
        TP_ARGS(dir, mode),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  dir                     )
                __field(        umode_t, mode                   )
        ),
 
        TP_fast_assign(
-               __entry->dev    = dir->i_sb->s_dev;
+               __entry->dev_major = MAJOR(dir->i_sb->s_dev);
+               __entry->dev_minor = MINOR(dir->i_sb->s_dev);
                __entry->dir    = dir->i_ino;
                __entry->mode   = mode;
        ),
 
-       TP_printk("dev %s dir %lu mode 0%o",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir,
-                 __entry->mode)
+       TP_printk("dev %d,%d dir %lu mode 0%o",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->dir, __entry->mode)
 );
 
 TRACE_EVENT(ext4_allocate_inode,
@@ -72,21 +77,24 @@ TRACE_EVENT(ext4_allocate_inode,
        TP_ARGS(inode, dir, mode),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        ino_t,  dir                     )
                __field(        umode_t, mode                   )
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->dir    = dir->i_ino;
                __entry->mode   = mode;
        ),
 
-       TP_printk("dev %s ino %lu dir %lu mode 0%o",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  (unsigned long) __entry->dir, __entry->mode)
 );
 
@@ -98,7 +106,8 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
        TP_ARGS(inode, pos, len, flags),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        loff_t, pos                     )
                __field(        unsigned int, len               )
@@ -106,15 +115,17 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->pos    = pos;
                __entry->len    = len;
                __entry->flags  = flags;
        ),
 
-       TP_printk("dev %s ino %lu pos %llu len %u flags %u",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->pos, __entry->len, __entry->flags)
 );
 
@@ -141,7 +152,8 @@ DECLARE_EVENT_CLASS(ext4__write_end,
        TP_ARGS(inode, pos, len, copied),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        loff_t, pos                     )
                __field(        unsigned int, len               )
@@ -149,16 +161,18 @@ DECLARE_EVENT_CLASS(ext4__write_end,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->pos    = pos;
                __entry->len    = len;
                __entry->copied = copied;
        ),
 
-       TP_printk("dev %s ino %lu pos %llu len %u copied %u",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->pos, __entry->len, __entry->copied)
+       TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->pos,
+                 __entry->len, __entry->copied)
 );
 
 DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
@@ -199,21 +213,23 @@ TRACE_EVENT(ext4_writepage,
        TP_ARGS(inode, page),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        pgoff_t, index                  )
 
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->index  = page->index;
        ),
 
-       TP_printk("dev %s ino %lu page_index %lu",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->index)
+       TP_printk("dev %d,%d ino %lu page_index %lu",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->index)
 );
 
 TRACE_EVENT(ext4_da_writepages,
@@ -222,13 +238,13 @@ TRACE_EVENT(ext4_da_writepages,
        TP_ARGS(inode, wbc),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        long,   nr_to_write             )
                __field(        long,   pages_skipped           )
                __field(        loff_t, range_start             )
                __field(        loff_t, range_end               )
-               __field(        char,   nonblocking             )
                __field(        char,   for_kupdate             )
                __field(        char,   for_reclaim             )
                __field(        char,   range_cyclic            )
@@ -236,7 +252,8 @@ TRACE_EVENT(ext4_da_writepages,
        ),
 
        TP_fast_assign(
-               __entry->dev            = inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(inode->i_sb->s_dev);
                __entry->ino            = inode->i_ino;
                __entry->nr_to_write    = wbc->nr_to_write;
                __entry->pages_skipped  = wbc->pages_skipped;
@@ -248,11 +265,11 @@ TRACE_EVENT(ext4_da_writepages,
                __entry->writeback_index = inode->i_mapping->writeback_index;
        ),
 
-       TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld "
+       TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
                  "range_start %llu range_end %llu "
                  "for_kupdate %d for_reclaim %d "
                  "range_cyclic %d writeback_index %lu",
-                 jbd2_dev_to_name(__entry->dev),
+                 __entry->dev_major, __entry->dev_minor,
                  (unsigned long) __entry->ino, __entry->nr_to_write,
                  __entry->pages_skipped, __entry->range_start,
                  __entry->range_end,
@@ -267,7 +284,8 @@ TRACE_EVENT(ext4_da_write_pages,
        TP_ARGS(inode, mpd),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        __u64,  b_blocknr               )
                __field(        __u32,  b_size                  )
@@ -278,7 +296,8 @@ TRACE_EVENT(ext4_da_write_pages,
        ),
 
        TP_fast_assign(
-               __entry->dev            = inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(inode->i_sb->s_dev);
                __entry->ino            = inode->i_ino;
                __entry->b_blocknr      = mpd->b_blocknr;
                __entry->b_size         = mpd->b_size;
@@ -288,8 +307,9 @@ TRACE_EVENT(ext4_da_write_pages,
                __entry->pages_written  = mpd->pages_written;
        ),
 
-       TP_printk("dev %s ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->b_blocknr, __entry->b_size,
                  __entry->b_state, __entry->first_page,
                  __entry->io_done, __entry->pages_written)
@@ -302,7 +322,8 @@ TRACE_EVENT(ext4_da_writepages_result,
        TP_ARGS(inode, wbc, ret, pages_written),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        int,    ret                     )
                __field(        int,    pages_written           )
@@ -312,7 +333,8 @@ TRACE_EVENT(ext4_da_writepages_result,
        ),
 
        TP_fast_assign(
-               __entry->dev            = inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(inode->i_sb->s_dev);
                __entry->ino            = inode->i_ino;
                __entry->ret            = ret;
                __entry->pages_written  = pages_written;
@@ -321,8 +343,8 @@ TRACE_EVENT(ext4_da_writepages_result,
                __entry->writeback_index = inode->i_mapping->writeback_index;
        ),
 
-       TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu",
-                 jbd2_dev_to_name(__entry->dev),
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu",
+                 __entry->dev_major, __entry->dev_minor,
                  (unsigned long) __entry->ino, __entry->ret,
                  __entry->pages_written, __entry->pages_skipped,
                  __entry->more_io,
@@ -336,20 +358,23 @@ TRACE_EVENT(ext4_discard_blocks,
        TP_ARGS(sb, blk, count),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        __u64,  blk                     )
                __field(        __u64,  count                   )
 
        ),
 
        TP_fast_assign(
-               __entry->dev    = sb->s_dev;
+               __entry->dev_major = MAJOR(sb->s_dev);
+               __entry->dev_minor = MINOR(sb->s_dev);
                __entry->blk    = blk;
                __entry->count  = count;
        ),
 
-       TP_printk("dev %s blk %llu count %llu",
-                 jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count)
+       TP_printk("dev %d,%d blk %llu count %llu",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->blk, __entry->count)
 );
 
 DECLARE_EVENT_CLASS(ext4__mb_new_pa,
@@ -359,7 +384,8 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
        TP_ARGS(ac, pa),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        __u64,  pa_pstart               )
                __field(        __u32,  pa_len                  )
@@ -368,16 +394,18 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = ac->ac_sb->s_dev;
+               __entry->dev_major      = MAJOR(ac->ac_sb->s_dev);
+               __entry->dev_minor      = MINOR(ac->ac_sb->s_dev);
                __entry->ino            = ac->ac_inode->i_ino;
                __entry->pa_pstart      = pa->pa_pstart;
                __entry->pa_len         = pa->pa_len;
                __entry->pa_lstart      = pa->pa_lstart;
        ),
 
-       TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
+       TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->pa_pstart,
+                 __entry->pa_len, __entry->pa_lstart)
 );
 
 DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
@@ -398,14 +426,15 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
 
 TRACE_EVENT(ext4_mb_release_inode_pa,
        TP_PROTO(struct super_block *sb,
-                struct ext4_allocation_context *ac,
+                struct inode *inode,
                 struct ext4_prealloc_space *pa,
                 unsigned long long block, unsigned int count),
 
-       TP_ARGS(sb, ac, pa, block, count),
+       TP_ARGS(sb, inode, pa, block, count),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        __u64,  block                   )
                __field(        __u32,  count                   )
@@ -413,43 +442,42 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = sb->s_dev;
-               __entry->ino            = (ac && ac->ac_inode) ? 
-                                               ac->ac_inode->i_ino : 0;
+               __entry->dev_major      = MAJOR(sb->s_dev);
+               __entry->dev_minor      = MINOR(sb->s_dev);
+               __entry->ino            = inode->i_ino;
                __entry->block          = block;
                __entry->count          = count;
        ),
 
-       TP_printk("dev %s ino %lu block %llu count %u",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->block, __entry->count)
+       TP_printk("dev %d,%d ino %lu block %llu count %u",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->block, __entry->count)
 );
 
 TRACE_EVENT(ext4_mb_release_group_pa,
        TP_PROTO(struct super_block *sb,
-                struct ext4_allocation_context *ac,
                 struct ext4_prealloc_space *pa),
 
-       TP_ARGS(sb, ac, pa),
+       TP_ARGS(sb, pa),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
-               __field(        ino_t,  ino                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        __u64,  pa_pstart               )
                __field(        __u32,  pa_len                  )
 
        ),
 
        TP_fast_assign(
-               __entry->dev            = sb->s_dev;
-               __entry->ino            = (ac && ac->ac_inode) ?
-                                               ac->ac_inode->i_ino : 0;
+               __entry->dev_major      = MAJOR(sb->s_dev);
+               __entry->dev_minor      = MINOR(sb->s_dev);
                __entry->pa_pstart      = pa->pa_pstart;
                __entry->pa_len         = pa->pa_len;
        ),
 
-       TP_printk("dev %s pstart %llu len %u",
-                 jbd2_dev_to_name(__entry->dev), __entry->pa_pstart, __entry->pa_len)
+       TP_printk("dev %d,%d pstart %llu len %u",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->pa_pstart, __entry->pa_len)
 );
 
 TRACE_EVENT(ext4_discard_preallocations,
@@ -458,18 +486,21 @@ TRACE_EVENT(ext4_discard_preallocations,
        TP_ARGS(inode),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
 
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
        ),
 
-       TP_printk("dev %s ino %lu",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
+       TP_printk("dev %d,%d ino %lu",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino)
 );
 
 TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -478,18 +509,20 @@ TRACE_EVENT(ext4_mb_discard_preallocations,
        TP_ARGS(sb, needed),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        int,    needed                  )
 
        ),
 
        TP_fast_assign(
-               __entry->dev    = sb->s_dev;
+               __entry->dev_major = MAJOR(sb->s_dev);
+               __entry->dev_minor = MINOR(sb->s_dev);
                __entry->needed = needed;
        ),
 
-       TP_printk("dev %s needed %d",
-                 jbd2_dev_to_name(__entry->dev), __entry->needed)
+       TP_printk("dev %d,%d needed %d",
+                 __entry->dev_major, __entry->dev_minor, __entry->needed)
 );
 
 TRACE_EVENT(ext4_request_blocks,
@@ -498,7 +531,8 @@ TRACE_EVENT(ext4_request_blocks,
        TP_ARGS(ar),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        unsigned int, flags             )
                __field(        unsigned int, len               )
@@ -511,7 +545,8 @@ TRACE_EVENT(ext4_request_blocks,
        ),
 
        TP_fast_assign(
-               __entry->dev    = ar->inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev);
                __entry->ino    = ar->inode->i_ino;
                __entry->flags  = ar->flags;
                __entry->len    = ar->len;
@@ -523,8 +558,9 @@ TRACE_EVENT(ext4_request_blocks,
                __entry->pright = ar->pright;
        ),
 
-       TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->flags, __entry->len,
                  (unsigned long long) __entry->logical,
                  (unsigned long long) __entry->goal,
@@ -540,7 +576,8 @@ TRACE_EVENT(ext4_allocate_blocks,
        TP_ARGS(ar, block),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        __u64,  block                   )
                __field(        unsigned int, flags             )
@@ -554,7 +591,8 @@ TRACE_EVENT(ext4_allocate_blocks,
        ),
 
        TP_fast_assign(
-               __entry->dev    = ar->inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev);
                __entry->ino    = ar->inode->i_ino;
                __entry->block  = block;
                __entry->flags  = ar->flags;
@@ -567,9 +605,10 @@ TRACE_EVENT(ext4_allocate_blocks,
                __entry->pright = ar->pright;
        ),
 
-       TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->flags, __entry->len, __entry->block,
+       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->block,
                  (unsigned long long) __entry->logical,
                  (unsigned long long) __entry->goal,
                  (unsigned long long) __entry->lleft,
@@ -585,7 +624,8 @@ TRACE_EVENT(ext4_free_blocks,
        TP_ARGS(inode, block, count, flags),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(      umode_t, mode                     )
                __field(        __u64,  block                   )
@@ -594,7 +634,8 @@ TRACE_EVENT(ext4_free_blocks,
        ),
 
        TP_fast_assign(
-               __entry->dev            = inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(inode->i_sb->s_dev);
                __entry->ino            = inode->i_ino;
                __entry->mode           = inode->i_mode;
                __entry->block          = block;
@@ -602,8 +643,9 @@ TRACE_EVENT(ext4_free_blocks,
                __entry->flags          = flags;
        ),
 
-       TP_printk("dev %s ino %lu mode 0%o block %llu count %lu flags %d",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->mode, __entry->block, __entry->count,
                  __entry->flags)
 );
@@ -614,7 +656,8 @@ TRACE_EVENT(ext4_sync_file,
        TP_ARGS(file, datasync),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        ino_t,  parent                  )
                __field(        int,    datasync                )
@@ -623,14 +666,16 @@ TRACE_EVENT(ext4_sync_file,
        TP_fast_assign(
                struct dentry *dentry = file->f_path.dentry;
 
-               __entry->dev            = dentry->d_inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(dentry->d_inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(dentry->d_inode->i_sb->s_dev);
                __entry->ino            = dentry->d_inode->i_ino;
                __entry->datasync       = datasync;
                __entry->parent         = dentry->d_parent->d_inode->i_ino;
        ),
 
-       TP_printk("dev %s ino %ld parent %ld datasync %d ",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %ld parent %ld datasync %d ",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  (unsigned long) __entry->parent, __entry->datasync)
 );
 
@@ -640,18 +685,20 @@ TRACE_EVENT(ext4_sync_fs,
        TP_ARGS(sb, wait),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        int,    wait                    )
 
        ),
 
        TP_fast_assign(
-               __entry->dev    = sb->s_dev;
+               __entry->dev_major = MAJOR(sb->s_dev);
+               __entry->dev_minor = MINOR(sb->s_dev);
                __entry->wait   = wait;
        ),
 
-       TP_printk("dev %s wait %d", jbd2_dev_to_name(__entry->dev),
-                 __entry->wait)
+       TP_printk("dev %d,%d wait %d", __entry->dev_major,
+                 __entry->dev_minor, __entry->wait)
 );
 
 TRACE_EVENT(ext4_alloc_da_blocks,
@@ -660,21 +707,24 @@ TRACE_EVENT(ext4_alloc_da_blocks,
        TP_ARGS(inode),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field( unsigned int,  data_blocks     )
                __field( unsigned int,  meta_blocks     )
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
                __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
        ),
 
-       TP_printk("dev %s ino %lu data_blocks %u meta_blocks %u",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->data_blocks, __entry->meta_blocks)
 );
 
@@ -684,7 +734,8 @@ TRACE_EVENT(ext4_mballoc_alloc,
        TP_ARGS(ac),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        __u16,  found                   )
                __field(        __u16,  groups                  )
@@ -707,7 +758,8 @@ TRACE_EVENT(ext4_mballoc_alloc,
        ),
 
        TP_fast_assign(
-               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(ac->ac_inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(ac->ac_inode->i_sb->s_dev);
                __entry->ino            = ac->ac_inode->i_ino;
                __entry->found          = ac->ac_found;
                __entry->flags          = ac->ac_flags;
@@ -729,10 +781,11 @@ TRACE_EVENT(ext4_mballoc_alloc,
                __entry->result_len     = ac->ac_f_ex.fe_len;
        ),
 
-       TP_printk("dev %s inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+       TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
                  "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
                  "tail %u broken %u",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->orig_group, __entry->orig_start,
                  __entry->orig_len, __entry->orig_logical,
                  __entry->goal_group, __entry->goal_start,
@@ -750,7 +803,8 @@ TRACE_EVENT(ext4_mballoc_prealloc,
        TP_ARGS(ac),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        __u32,  orig_logical            )
                __field(          int,  orig_start              )
@@ -763,7 +817,8 @@ TRACE_EVENT(ext4_mballoc_prealloc,
        ),
 
        TP_fast_assign(
-               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->dev_major      = MAJOR(ac->ac_inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(ac->ac_inode->i_sb->s_dev);
                __entry->ino            = ac->ac_inode->i_ino;
                __entry->orig_logical   = ac->ac_o_ex.fe_logical;
                __entry->orig_start     = ac->ac_o_ex.fe_start;
@@ -775,8 +830,9 @@ TRACE_EVENT(ext4_mballoc_prealloc,
                __entry->result_len     = ac->ac_b_ex.fe_len;
        ),
 
-       TP_printk("dev %s inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->orig_group, __entry->orig_start,
                  __entry->orig_len, __entry->orig_logical,
                  __entry->result_group, __entry->result_start,
@@ -784,46 +840,59 @@ TRACE_EVENT(ext4_mballoc_prealloc,
 );
 
 DECLARE_EVENT_CLASS(ext4__mballoc,
-       TP_PROTO(struct ext4_allocation_context *ac),
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
 
-       TP_ARGS(ac),
+       TP_ARGS(sb, inode, group, start, len),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
-               __field(        __u32,  result_logical          )
                __field(          int,  result_start            )
                __field(        __u32,  result_group            )
                __field(          int,  result_len              )
        ),
 
        TP_fast_assign(
-               __entry->dev            = ac->ac_inode->i_sb->s_dev;
-               __entry->ino            = ac->ac_inode->i_ino;
-               __entry->result_logical = ac->ac_b_ex.fe_logical;
-               __entry->result_start   = ac->ac_b_ex.fe_start;
-               __entry->result_group   = ac->ac_b_ex.fe_group;
-               __entry->result_len     = ac->ac_b_ex.fe_len;
+               __entry->dev_major      = MAJOR(sb->s_dev);
+               __entry->dev_minor      = MINOR(sb->s_dev);
+               __entry->ino            = inode ? inode->i_ino : 0;
+               __entry->result_start   = start;
+               __entry->result_group   = group;
+               __entry->result_len     = len;
        ),
 
-       TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d inode %lu extent %u/%d/%u ",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->result_group, __entry->result_start,
-                 __entry->result_len, __entry->result_logical)
+                 __entry->result_len)
 );
 
 DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
 
-       TP_PROTO(struct ext4_allocation_context *ac),
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
 
-       TP_ARGS(ac)
+       TP_ARGS(sb, inode, group, start, len)
 );
 
 DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
 
-       TP_PROTO(struct ext4_allocation_context *ac),
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
 
-       TP_ARGS(ac)
+       TP_ARGS(sb, inode, group, start, len)
 );
 
 TRACE_EVENT(ext4_forget,
@@ -832,7 +901,8 @@ TRACE_EVENT(ext4_forget,
        TP_ARGS(inode, is_metadata, block),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        umode_t, mode                   )
                __field(        int,    is_metadata             )
@@ -840,16 +910,18 @@ TRACE_EVENT(ext4_forget,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->mode   = inode->i_mode;
                __entry->is_metadata = is_metadata;
                __entry->block  = block;
        ),
 
-       TP_printk("dev %s ino %lu mode 0%o is_metadata %d block %llu",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->mode, __entry->is_metadata, __entry->block)
+       TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->mode,
+                 __entry->is_metadata, __entry->block)
 );
 
 TRACE_EVENT(ext4_da_update_reserve_space,
@@ -858,7 +930,8 @@ TRACE_EVENT(ext4_da_update_reserve_space,
        TP_ARGS(inode, used_blocks),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        umode_t, mode                   )
                __field(        __u64,  i_blocks                )
@@ -869,7 +942,8 @@ TRACE_EVENT(ext4_da_update_reserve_space,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->mode   = inode->i_mode;
                __entry->i_blocks = inode->i_blocks;
@@ -879,9 +953,10 @@ TRACE_EVENT(ext4_da_update_reserve_space,
                __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
        ),
 
-       TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
-                 __entry->mode,  (unsigned long long) __entry->i_blocks,
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->mode,
+                 (unsigned long long) __entry->i_blocks,
                  __entry->used_blocks, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
 );
@@ -892,7 +967,8 @@ TRACE_EVENT(ext4_da_reserve_space,
        TP_ARGS(inode, md_needed),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        umode_t, mode                   )
                __field(        __u64,  i_blocks                )
@@ -902,7 +978,8 @@ TRACE_EVENT(ext4_da_reserve_space,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->mode   = inode->i_mode;
                __entry->i_blocks = inode->i_blocks;
@@ -911,8 +988,9 @@ TRACE_EVENT(ext4_da_reserve_space,
                __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
        ),
 
-       TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->mode, (unsigned long long) __entry->i_blocks,
                  __entry->md_needed, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks)
@@ -924,7 +1002,8 @@ TRACE_EVENT(ext4_da_release_space,
        TP_ARGS(inode, freed_blocks),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
                __field(        umode_t, mode                   )
                __field(        __u64,  i_blocks                )
@@ -935,7 +1014,8 @@ TRACE_EVENT(ext4_da_release_space,
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
                __entry->mode   = inode->i_mode;
                __entry->i_blocks = inode->i_blocks;
@@ -945,8 +1025,9 @@ TRACE_EVENT(ext4_da_release_space,
                __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
        ),
 
-       TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
                  __entry->mode, (unsigned long long) __entry->i_blocks,
                  __entry->freed_blocks, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
@@ -958,18 +1039,20 @@ DECLARE_EVENT_CLASS(ext4__bitmap_load,
        TP_ARGS(sb, group),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        __u32,  group                   )
 
        ),
 
        TP_fast_assign(
-               __entry->dev    = sb->s_dev;
+               __entry->dev_major = MAJOR(sb->s_dev);
+               __entry->dev_minor = MINOR(sb->s_dev);
                __entry->group  = group;
        ),
 
-       TP_printk("dev %s group %u",
-                 jbd2_dev_to_name(__entry->dev), __entry->group)
+       TP_printk("dev %d,%d group %u",
+                 __entry->dev_major, __entry->dev_minor, __entry->group)
 );
 
 DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
index 6fa7cbab7d932c6649e9fbd4221615b6b4d0fd8d..1c09820df58564f8d0430d996998edc6f8d893c0 100644 (file)
@@ -86,76 +86,62 @@ TRACE_EVENT(irq_handler_exit,
 
 DECLARE_EVENT_CLASS(softirq,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec),
+       TP_ARGS(vec_nr),
 
        TP_STRUCT__entry(
-               __field(        int,    vec                     )
+               __field(        unsigned int,   vec     )
        ),
 
        TP_fast_assign(
-               if (vec)
-                       __entry->vec = (int)(h - vec);
-               else
-                       __entry->vec = (int)(long)h;
+               __entry->vec = vec_nr;
        ),
 
-       TP_printk("vec=%d [action=%s]", __entry->vec,
+       TP_printk("vec=%u [action=%s]", __entry->vec,
                  show_softirq_name(__entry->vec))
 );
 
 /**
  * softirq_entry - called immediately before the softirq handler
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr:  softirq vector number
  *
- * The @h parameter, contains a pointer to the struct softirq_action
- * which has a pointer to the action handler that is called. By subtracting
- * the @vec pointer from the @h pointer, we can determine the softirq
- * number. Also, when used in combination with the softirq_exit tracepoint
- * we can determine the softirq latency.
+ * When used in combination with the softirq_exit tracepoint
+ * we can determine the softirq handler runtine.
  */
 DEFINE_EVENT(softirq, softirq_entry,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec)
+       TP_ARGS(vec_nr)
 );
 
 /**
  * softirq_exit - called immediately after the softirq handler returns
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr:  softirq vector number
  *
- * The @h parameter contains a pointer to the struct softirq_action
- * that has handled the softirq. By subtracting the @vec pointer from
- * the @h pointer, we can determine the softirq number. Also, when used in
- * combination with the softirq_entry tracepoint we can determine the softirq
- * latency.
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq handler runtine.
  */
 DEFINE_EVENT(softirq, softirq_exit,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec)
+       TP_ARGS(vec_nr)
 );
 
 /**
  * softirq_raise - called immediately when a softirq is raised
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr:  softirq vector number
  *
- * The @h parameter contains a pointer to the softirq vector number which is
- * raised. @vec is NULL and it means @h includes vector number not
- * softirq_action. When used in combination with the softirq_entry tracepoint
- * we can determine the softirq raise latency.
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise to run latency.
  */
 DEFINE_EVENT(softirq, softirq_raise,
 
-       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+       TP_PROTO(unsigned int vec_nr),
 
-       TP_ARGS(h, vec)
+       TP_ARGS(vec_nr)
 );
 
 #endif /*  _TRACE_IRQ_H */
index bf16545cc97756d263305f17712f522b979bbf32..7447ea9305b54eeece947d812a1c87cd3a58ba1c 100644 (file)
@@ -17,17 +17,19 @@ TRACE_EVENT(jbd2_checkpoint,
        TP_ARGS(journal, result),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,    dev_major               )
+               __field(        int,    dev_minor               )
                __field(        int,    result                  )
        ),
 
        TP_fast_assign(
-               __entry->dev            = journal->j_fs_dev->bd_dev;
+               __entry->dev_major      = MAJOR(journal->j_fs_dev->bd_dev);
+               __entry->dev_minor      = MINOR(journal->j_fs_dev->bd_dev);
                __entry->result         = result;
        ),
 
-       TP_printk("dev %s result %d",
-                 jbd2_dev_to_name(__entry->dev), __entry->result)
+       TP_printk("dev %d,%d result %d",
+                 __entry->dev_major, __entry->dev_minor, __entry->result)
 );
 
 DECLARE_EVENT_CLASS(jbd2_commit,
@@ -37,20 +39,22 @@ DECLARE_EVENT_CLASS(jbd2_commit,
        TP_ARGS(journal, commit_transaction),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        char,   sync_commit               )
                __field(        int,    transaction               )
        ),
 
        TP_fast_assign(
-               __entry->dev            = journal->j_fs_dev->bd_dev;
+               __entry->dev_major      = MAJOR(journal->j_fs_dev->bd_dev);
+               __entry->dev_minor      = MINOR(journal->j_fs_dev->bd_dev);
                __entry->sync_commit = commit_transaction->t_synchronous_commit;
                __entry->transaction    = commit_transaction->t_tid;
        ),
 
-       TP_printk("dev %s transaction %d sync %d",
-                 jbd2_dev_to_name(__entry->dev), __entry->transaction,
-                 __entry->sync_commit)
+       TP_printk("dev %d,%d transaction %d sync %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->transaction, __entry->sync_commit)
 );
 
 DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
@@ -87,22 +91,24 @@ TRACE_EVENT(jbd2_end_commit,
        TP_ARGS(journal, commit_transaction),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        char,   sync_commit               )
                __field(        int,    transaction               )
                __field(        int,    head                      )
        ),
 
        TP_fast_assign(
-               __entry->dev            = journal->j_fs_dev->bd_dev;
+               __entry->dev_major      = MAJOR(journal->j_fs_dev->bd_dev);
+               __entry->dev_minor      = MINOR(journal->j_fs_dev->bd_dev);
                __entry->sync_commit = commit_transaction->t_synchronous_commit;
                __entry->transaction    = commit_transaction->t_tid;
                __entry->head           = journal->j_tail_sequence;
        ),
 
-       TP_printk("dev %s transaction %d sync %d head %d",
-                 jbd2_dev_to_name(__entry->dev), __entry->transaction,
-                 __entry->sync_commit, __entry->head)
+       TP_printk("dev %d,%d transaction %d sync %d head %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->transaction, __entry->sync_commit, __entry->head)
 );
 
 TRACE_EVENT(jbd2_submit_inode_data,
@@ -111,17 +117,20 @@ TRACE_EVENT(jbd2_submit_inode_data,
        TP_ARGS(inode),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        ino_t,  ino                     )
        ),
 
        TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
                __entry->ino    = inode->i_ino;
        ),
 
-       TP_printk("dev %s ino %lu",
-                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
+       TP_printk("dev %d,%d ino %lu",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino)
 );
 
 TRACE_EVENT(jbd2_run_stats,
@@ -131,7 +140,8 @@ TRACE_EVENT(jbd2_run_stats,
        TP_ARGS(dev, tid, stats),
 
        TP_STRUCT__entry(
-               __field(                dev_t,  dev             )
+               __field(                  int,  dev_major       )
+               __field(                  int,  dev_minor       )
                __field(        unsigned long,  tid             )
                __field(        unsigned long,  wait            )
                __field(        unsigned long,  running         )
@@ -144,7 +154,8 @@ TRACE_EVENT(jbd2_run_stats,
        ),
 
        TP_fast_assign(
-               __entry->dev            = dev;
+               __entry->dev_major      = MAJOR(dev);
+               __entry->dev_minor      = MINOR(dev);
                __entry->tid            = tid;
                __entry->wait           = stats->rs_wait;
                __entry->running        = stats->rs_running;
@@ -156,9 +167,9 @@ TRACE_EVENT(jbd2_run_stats,
                __entry->blocks_logged  = stats->rs_blocks_logged;
        ),
 
-       TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
+       TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
                  "logging %u handle_count %u blocks %u blocks_logged %u",
-                 jbd2_dev_to_name(__entry->dev), __entry->tid,
+                 __entry->dev_major, __entry->dev_minor, __entry->tid,
                  jiffies_to_msecs(__entry->wait),
                  jiffies_to_msecs(__entry->running),
                  jiffies_to_msecs(__entry->locked),
@@ -175,7 +186,8 @@ TRACE_EVENT(jbd2_checkpoint_stats,
        TP_ARGS(dev, tid, stats),
 
        TP_STRUCT__entry(
-               __field(                dev_t,  dev             )
+               __field(                  int,  dev_major       )
+               __field(                  int,  dev_minor       )
                __field(        unsigned long,  tid             )
                __field(        unsigned long,  chp_time        )
                __field(                __u32,  forced_to_close )
@@ -184,7 +196,8 @@ TRACE_EVENT(jbd2_checkpoint_stats,
        ),
 
        TP_fast_assign(
-               __entry->dev            = dev;
+               __entry->dev_major      = MAJOR(dev);
+               __entry->dev_minor      = MINOR(dev);
                __entry->tid            = tid;
                __entry->chp_time       = stats->cs_chp_time;
                __entry->forced_to_close= stats->cs_forced_to_close;
@@ -192,9 +205,9 @@ TRACE_EVENT(jbd2_checkpoint_stats,
                __entry->dropped        = stats->cs_dropped;
        ),
 
-       TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
+       TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
                  "written %u dropped %u",
-                 jbd2_dev_to_name(__entry->dev), __entry->tid,
+                 __entry->dev_major, __entry->dev_minor, __entry->tid,
                  jiffies_to_msecs(__entry->chp_time),
                  __entry->forced_to_close, __entry->written, __entry->dropped)
 );
@@ -207,7 +220,8 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
        TP_ARGS(journal, first_tid, block_nr, freed),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
                __field(        tid_t,  tail_sequence           )
                __field(        tid_t,  first_tid               )
                __field(unsigned long,  block_nr                )
@@ -215,16 +229,18 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
        ),
 
        TP_fast_assign(
-               __entry->dev            = journal->j_fs_dev->bd_dev;
+               __entry->dev_major      = MAJOR(journal->j_fs_dev->bd_dev);
+               __entry->dev_minor      = MINOR(journal->j_fs_dev->bd_dev);
                __entry->tail_sequence  = journal->j_tail_sequence;
                __entry->first_tid      = first_tid;
                __entry->block_nr       = block_nr;
                __entry->freed          = freed;
        ),
 
-       TP_printk("dev %s from %u to %u offset %lu freed %lu",
-                 jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
-                 __entry->first_tid, __entry->block_nr, __entry->freed)
+       TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->tail_sequence, __entry->first_tid,
+                 __entry->block_nr, __entry->freed)
 );
 
 #endif /* _TRACE_JBD2_H */
index 3ae8ffe738eb4878d9d1fe383e36279026d396e0..88c10468db467bb862554aab55f5d4891e4b0f03 100644 (file)
@@ -518,7 +518,6 @@ if CGROUPS
 
 config CGROUP_DEBUG
        bool "Example debug cgroup subsystem"
-       depends on CGROUPS
        default n
        help
          This option enables a simple cgroup subsystem that
@@ -529,7 +528,6 @@ config CGROUP_DEBUG
 
 config CGROUP_NS
        bool "Namespace cgroup subsystem"
-       depends on CGROUPS
        help
          Provides a simple namespace cgroup subsystem to
          provide hierarchical naming of sets of namespaces,
@@ -538,21 +536,18 @@ config CGROUP_NS
 
 config CGROUP_FREEZER
        bool "Freezer cgroup subsystem"
-       depends on CGROUPS
        help
          Provides a way to freeze and unfreeze all tasks in a
          cgroup.
 
 config CGROUP_DEVICE
        bool "Device controller for cgroups"
-       depends on CGROUPS && EXPERIMENTAL
        help
          Provides a cgroup implementing whitelists for devices which
          a process in the cgroup can mknod or open.
 
 config CPUSETS
        bool "Cpuset support"
-       depends on CGROUPS
        help
          This option will let you create and manage CPUSETs which
          allow dynamically partitioning a system into sets of CPUs and
@@ -568,7 +563,6 @@ config PROC_PID_CPUSET
 
 config CGROUP_CPUACCT
        bool "Simple CPU accounting cgroup subsystem"
-       depends on CGROUPS
        help
          Provides a simple Resource Controller for monitoring the
          total CPU consumed by the tasks in a cgroup.
@@ -578,11 +572,10 @@ config RESOURCE_COUNTERS
        help
          This option enables controller independent resource accounting
          infrastructure that works with cgroups.
-       depends on CGROUPS
 
 config CGROUP_MEM_RES_CTLR
        bool "Memory Resource Controller for Control Groups"
-       depends on CGROUPS && RESOURCE_COUNTERS
+       depends on RESOURCE_COUNTERS
        select MM_OWNER
        help
          Provides a memory resource controller that manages both anonymous
@@ -623,7 +616,7 @@ config CGROUP_MEM_RES_CTLR_SWAP
 
 menuconfig CGROUP_SCHED
        bool "Group CPU scheduler"
-       depends on EXPERIMENTAL && CGROUPS
+       depends on EXPERIMENTAL
        default n
        help
          This feature lets CPU scheduler recognize task groups and control CPU
@@ -652,7 +645,7 @@ endif #CGROUP_SCHED
 
 config BLK_CGROUP
        tristate "Block IO controller"
-       depends on CGROUPS && BLOCK
+       depends on BLOCK
        default n
        ---help---
        Generic block IO controller cgroup interface. This is the common
@@ -682,6 +675,59 @@ config DEBUG_BLK_CGROUP
 
 endif # CGROUPS
 
+menuconfig NAMESPACES
+       bool "Namespaces support" if EMBEDDED
+       default !EMBEDDED
+       help
+         Provides the way to make tasks work with different objects using
+         the same id. For example same IPC id may refer to different objects
+         or same user id or pid may refer to different tasks when used in
+         different namespaces.
+
+if NAMESPACES
+
+config UTS_NS
+       bool "UTS namespace"
+       default y
+       help
+         In this namespace tasks see different info provided with the
+         uname() system call
+
+config IPC_NS
+       bool "IPC namespace"
+       depends on (SYSVIPC || POSIX_MQUEUE)
+       default y
+       help
+         In this namespace tasks work with IPC ids which correspond to
+         different IPC objects in different namespaces.
+
+config USER_NS
+       bool "User namespace (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       default y
+       help
+         This allows containers, i.e. vservers, to use user namespaces
+         to provide different user info for different servers.
+         If unsure, say N.
+
+config PID_NS
+       bool "PID Namespaces"
+       default y
+       help
+         Support process id namespaces.  This allows having multiple
+         processes with the same pid as long as they are in different
+         pid namespaces.  This is a building block of containers.
+
+config NET_NS
+       bool "Network namespace"
+       depends on NET
+       default y
+       help
+         Allow user space to create what appear to be multiple instances
+         of the network stack.
+
+endif # NAMESPACES
+
 config MM_OWNER
        bool
 
@@ -734,57 +780,6 @@ config RELAY
 
          If unsure, say N.
 
-config NAMESPACES
-       bool "Namespaces support" if EMBEDDED
-       default !EMBEDDED
-       help
-         Provides the way to make tasks work with different objects using
-         the same id. For example same IPC id may refer to different objects
-         or same user id or pid may refer to different tasks when used in
-         different namespaces.
-
-config UTS_NS
-       bool "UTS namespace"
-       depends on NAMESPACES
-       help
-         In this namespace tasks see different info provided with the
-         uname() system call
-
-config IPC_NS
-       bool "IPC namespace"
-       depends on NAMESPACES && (SYSVIPC || POSIX_MQUEUE)
-       help
-         In this namespace tasks work with IPC ids which correspond to
-         different IPC objects in different namespaces.
-
-config USER_NS
-       bool "User namespace (EXPERIMENTAL)"
-       depends on NAMESPACES && EXPERIMENTAL
-       help
-         This allows containers, i.e. vservers, to use user namespaces
-         to provide different user info for different servers.
-         If unsure, say N.
-
-config PID_NS
-       bool "PID Namespaces (EXPERIMENTAL)"
-       default n
-       depends on NAMESPACES && EXPERIMENTAL
-       help
-         Support process id namespaces.  This allows having multiple
-         processes with the same pid as long as they are in different
-         pid namespaces.  This is a building block of containers.
-
-         Unless you want to work with an experimental feature
-         say N here.
-
-config NET_NS
-       bool "Network namespace"
-       default n
-       depends on NAMESPACES && EXPERIMENTAL && NET
-       help
-         Allow user space to create what appear to be multiple instances
-         of the network stack.
-
 config BLK_DEV_INITRD
        bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
        depends on BROKEN || !FRV
index 9dc2c7d3c9e6de04732f79e113e73e1e6d52b368..845a28738d3a824e5c7a4cd583c28c9d793467cf 100644 (file)
@@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
        struct semid64_ds __user *up64;
        int version = compat_ipc_parse_version(&third);
 
+       memset(&s64, 0, sizeof(s64));
+
        if (!uptr)
                return -EINVAL;
        if (get_user(pad, (u32 __user *) uptr))
@@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
        int version = compat_ipc_parse_version(&second);
        void __user *p;
 
+       memset(&m64, 0, sizeof(m64));
+
        switch (second & (~IPC_64)) {
        case IPC_INFO:
        case IPC_RMID:
@@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
        int err, err2;
        int version = compat_ipc_parse_version(&second);
 
+       memset(&s64, 0, sizeof(s64));
+
        switch (second & (~IPC_64)) {
        case IPC_RMID:
        case SHM_LOCK:
index d8d1e9ff4e8869ba1c9ebe6300f37dec6c62e89b..380ea4fe08e7151c71c64a39eac8a9e92a2ea7ee 100644 (file)
@@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
        void __user *p = NULL;
        if (u_attr && oflag & O_CREAT) {
                struct mq_attr attr;
+
+               memset(&attr, 0, sizeof(attr));
+
                p = compat_alloc_user_space(sizeof(attr));
                if (get_compat_mq_attr(&attr, u_attr) ||
                    copy_to_user(p, &attr, sizeof(attr)))
@@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
        struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
        long ret;
 
+       memset(&mqstat, 0, sizeof(mqstat));
+
        if (u_mqstat) {
                if (get_compat_mq_attr(&mqstat, u_mqstat) ||
                    copy_to_user(p, &mqstat, sizeof(mqstat)))
index 7bc46a9fe1f8709cadae67f329f57b0943403832..fd658a1c2b88d43f565baaeb3666cc583caaef28 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -108,7 +108,11 @@ void __init shm_init (void)
 {
        shm_init_ns(&init_ipc_ns);
        ipc_init_proc_interface("sysvipc/shm",
-                               "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n",
+#if BITS_PER_LONG <= 32
+                               "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
+#else
+                               "       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
+#endif
                                IPC_SHM_IDS, sysvipc_shm_proc_show);
 }
 
@@ -543,6 +547,34 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
        }
 }
 
+/*
+ * Calculate and add used RSS and swap pages of a shm.
+ * Called with shm_ids.rw_mutex held as a reader
+ */
+static void shm_add_rss_swap(struct shmid_kernel *shp,
+       unsigned long *rss_add, unsigned long *swp_add)
+{
+       struct inode *inode;
+
+       inode = shp->shm_file->f_path.dentry->d_inode;
+
+       if (is_file_hugepages(shp->shm_file)) {
+               struct address_space *mapping = inode->i_mapping;
+               struct hstate *h = hstate_file(shp->shm_file);
+               *rss_add += pages_per_huge_page(h) * mapping->nrpages;
+       } else {
+#ifdef CONFIG_SHMEM
+               struct shmem_inode_info *info = SHMEM_I(inode);
+               spin_lock(&info->lock);
+               *rss_add += inode->i_mapping->nrpages;
+               *swp_add += info->swapped;
+               spin_unlock(&info->lock);
+#else
+               *rss_add += inode->i_mapping->nrpages;
+#endif
+       }
+}
+
 /*
  * Called with shm_ids.rw_mutex held as a reader
  */
@@ -560,30 +592,13 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
        for (total = 0, next_id = 0; total < in_use; next_id++) {
                struct kern_ipc_perm *ipc;
                struct shmid_kernel *shp;
-               struct inode *inode;
 
                ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
                if (ipc == NULL)
                        continue;
                shp = container_of(ipc, struct shmid_kernel, shm_perm);
 
-               inode = shp->shm_file->f_path.dentry->d_inode;
-
-               if (is_file_hugepages(shp->shm_file)) {
-                       struct address_space *mapping = inode->i_mapping;
-                       struct hstate *h = hstate_file(shp->shm_file);
-                       *rss += pages_per_huge_page(h) * mapping->nrpages;
-               } else {
-#ifdef CONFIG_SHMEM
-                       struct shmem_inode_info *info = SHMEM_I(inode);
-                       spin_lock(&info->lock);
-                       *rss += inode->i_mapping->nrpages;
-                       *swp += info->swapped;
-                       spin_unlock(&info->lock);
-#else
-                       *rss += inode->i_mapping->nrpages;
-#endif
-               }
+               shm_add_rss_swap(shp, rss, swp);
 
                total++;
        }
@@ -1072,6 +1087,9 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
 {
        struct shmid_kernel *shp = it;
+       unsigned long rss = 0, swp = 0;
+
+       shm_add_rss_swap(shp, &rss, &swp);
 
 #if BITS_PER_LONG <= 32
 #define SIZE_SPEC "%10lu"
@@ -1081,7 +1099,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
 
        return seq_printf(s,
                          "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
-                         "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
+                         "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
+                         SIZE_SPEC " " SIZE_SPEC "\n",
                          shp->shm_perm.key,
                          shp->shm_perm.id,
                          shp->shm_perm.mode,
@@ -1095,6 +1114,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
                          shp->shm_perm.cgid,
                          shp->shm_atim,
                          shp->shm_dtim,
-                         shp->shm_ctim);
+                         shp->shm_ctim,
+                         rss * PAGE_SIZE,
+                         swp * PAGE_SIZE);
 }
 #endif
index 9270d532ec3c7c6a5be2cf3d1eca21788036b81f..5cf366965d0ca5ec8766781367135838f3b2eac0 100644 (file)
@@ -243,6 +243,11 @@ static int notify_on_release(const struct cgroup *cgrp)
        return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
 }
 
+static int clone_children(const struct cgroup *cgrp)
+{
+       return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+}
+
 /*
  * for_each_subsys() allows you to iterate on each subsystem attached to
  * an active hierarchy
@@ -1040,6 +1045,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",noprefix");
        if (strlen(root->release_agent_path))
                seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+       if (clone_children(&root->top_cgroup))
+               seq_puts(seq, ",clone_children");
        if (strlen(root->name))
                seq_printf(seq, ",name=%s", root->name);
        mutex_unlock(&cgroup_mutex);
@@ -1050,6 +1057,7 @@ struct cgroup_sb_opts {
        unsigned long subsys_bits;
        unsigned long flags;
        char *release_agent;
+       bool clone_children;
        char *name;
        /* User explicitly requested empty subsystem */
        bool none;
@@ -1066,7 +1074,8 @@ struct cgroup_sb_opts {
  */
 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
 {
-       char *token, *o = data ?: "all";
+       char *token, *o = data;
+       bool all_ss = false, one_ss = false;
        unsigned long mask = (unsigned long)-1;
        int i;
        bool module_pin_failed = false;
@@ -1082,22 +1091,27 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        while ((token = strsep(&o, ",")) != NULL) {
                if (!*token)
                        return -EINVAL;
-               if (!strcmp(token, "all")) {
-                       /* Add all non-disabled subsystems */
-                       opts->subsys_bits = 0;
-                       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-                               struct cgroup_subsys *ss = subsys[i];
-                               if (ss == NULL)
-                                       continue;
-                               if (!ss->disabled)
-                                       opts->subsys_bits |= 1ul << i;
-                       }
-               } else if (!strcmp(token, "none")) {
+               if (!strcmp(token, "none")) {
                        /* Explicitly have no subsystems */
                        opts->none = true;
-               } else if (!strcmp(token, "noprefix")) {
+                       continue;
+               }
+               if (!strcmp(token, "all")) {
+                       /* Mutually exclusive option 'all' + subsystem name */
+                       if (one_ss)
+                               return -EINVAL;
+                       all_ss = true;
+                       continue;
+               }
+               if (!strcmp(token, "noprefix")) {
                        set_bit(ROOT_NOPREFIX, &opts->flags);
-               } else if (!strncmp(token, "release_agent=", 14)) {
+                       continue;
+               }
+               if (!strcmp(token, "clone_children")) {
+                       opts->clone_children = true;
+                       continue;
+               }
+               if (!strncmp(token, "release_agent=", 14)) {
                        /* Specifying two release agents is forbidden */
                        if (opts->release_agent)
                                return -EINVAL;
@@ -1105,7 +1119,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                                kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
                        if (!opts->release_agent)
                                return -ENOMEM;
-               } else if (!strncmp(token, "name=", 5)) {
+                       continue;
+               }
+               if (!strncmp(token, "name=", 5)) {
                        const char *name = token + 5;
                        /* Can't specify an empty name */
                        if (!strlen(name))
@@ -1127,20 +1143,44 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                                              GFP_KERNEL);
                        if (!opts->name)
                                return -ENOMEM;
-               } else {
-                       struct cgroup_subsys *ss;
-                       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-                               ss = subsys[i];
-                               if (ss == NULL)
-                                       continue;
-                               if (!strcmp(token, ss->name)) {
-                                       if (!ss->disabled)
-                                               set_bit(i, &opts->subsys_bits);
-                                       break;
-                               }
-                       }
-                       if (i == CGROUP_SUBSYS_COUNT)
-                               return -ENOENT;
+
+                       continue;
+               }
+
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+                       struct cgroup_subsys *ss = subsys[i];
+                       if (ss == NULL)
+                               continue;
+                       if (strcmp(token, ss->name))
+                               continue;
+                       if (ss->disabled)
+                               continue;
+
+                       /* Mutually exclusive option 'all' + subsystem name */
+                       if (all_ss)
+                               return -EINVAL;
+                       set_bit(i, &opts->subsys_bits);
+                       one_ss = true;
+
+                       break;
+               }
+               if (i == CGROUP_SUBSYS_COUNT)
+                       return -ENOENT;
+       }
+
+       /*
+        * If the 'all' option was specified select all the subsystems,
+        * otherwise 'all, 'none' and a subsystem name options were not
+        * specified, let's default to 'all'
+        */
+       if (all_ss || (!all_ss && !one_ss && !opts->none)) {
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+                       struct cgroup_subsys *ss = subsys[i];
+                       if (ss == NULL)
+                               continue;
+                       if (ss->disabled)
+                               continue;
+                       set_bit(i, &opts->subsys_bits);
                }
        }
 
@@ -1355,6 +1395,8 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
                strcpy(root->release_agent_path, opts->release_agent);
        if (opts->name)
                strcpy(root->name, opts->name);
+       if (opts->clone_children)
+               set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
        return root;
 }
 
@@ -1880,6 +1922,8 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
                                      const char *buffer)
 {
        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+       if (strlen(buffer) >= PATH_MAX)
+               return -EINVAL;
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
        strcpy(cgrp->root->release_agent_path, buffer);
@@ -3173,6 +3217,23 @@ fail:
        return ret;
 }
 
+static u64 cgroup_clone_children_read(struct cgroup *cgrp,
+                                   struct cftype *cft)
+{
+       return clone_children(cgrp);
+}
+
+static int cgroup_clone_children_write(struct cgroup *cgrp,
+                                    struct cftype *cft,
+                                    u64 val)
+{
+       if (val)
+               set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+       else
+               clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+       return 0;
+}
+
 /*
  * for the common functions, 'private' gives the type of file
  */
@@ -3203,6 +3264,11 @@ static struct cftype files[] = {
                .write_string = cgroup_write_event_control,
                .mode = S_IWUGO,
        },
+       {
+               .name = "cgroup.clone_children",
+               .read_u64 = cgroup_clone_children_read,
+               .write_u64 = cgroup_clone_children_write,
+       },
 };
 
 static struct cftype cft_release_agent = {
@@ -3332,6 +3398,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        if (notify_on_release(parent))
                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
 
+       if (clone_children(parent))
+               set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+
        for_each_subsys(root, ss) {
                struct cgroup_subsys_state *css = ss->create(ss, cgrp);
 
@@ -3346,6 +3415,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                                goto err_destroy;
                }
                /* At error, ->destroy() callback has to free assigned ID. */
+               if (clone_children(parent) && ss->post_clone)
+                       ss->post_clone(ss, cgrp);
        }
 
        cgroup_lock_hierarchy(root);
index ce71ed53e88fbd1e648de49242d9fa63cc570685..e7bebb7c6c38ff7ddaefb807d7e50cd1bcc1adbc 100644 (file)
@@ -48,20 +48,19 @@ static inline struct freezer *task_freezer(struct task_struct *task)
                            struct freezer, css);
 }
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
 {
-       struct freezer *freezer;
-       enum freezer_state state;
+       enum freezer_state state = task_freezer(task)->state;
+       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+}
 
+int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+       int result;
        task_lock(task);
-       freezer = task_freezer(task);
-       if (!freezer->css.cgroup->parent)
-               state = CGROUP_THAWED; /* root cgroup can't be frozen */
-       else
-               state = freezer->state;
+       result = __cgroup_freezing_or_frozen(task);
        task_unlock(task);
-
-       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+       return result;
 }
 
 /*
@@ -154,13 +153,6 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
-/* Task is frozen or will freeze immediately when next it gets woken */
-static bool is_task_frozen_enough(struct task_struct *task)
-{
-       return frozen(task) ||
-               (task_is_stopped_or_traced(task) && freezing(task));
-}
-
 /*
  * The call to cgroup_lock() in the freezer.state write method prevents
  * a write to that file racing against an attach, and hence the
@@ -174,24 +166,25 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
 
        /*
         * Anything frozen can't move or be moved to/from.
-        *
-        * Since orig_freezer->state == FROZEN means that @task has been
-        * frozen, so it's sufficient to check the latter condition.
         */
 
-       if (is_task_frozen_enough(task))
+       freezer = cgroup_freezer(new_cgroup);
+       if (freezer->state != CGROUP_THAWED)
                return -EBUSY;
 
-       freezer = cgroup_freezer(new_cgroup);
-       if (freezer->state == CGROUP_FROZEN)
+       rcu_read_lock();
+       if (__cgroup_freezing_or_frozen(task)) {
+               rcu_read_unlock();
                return -EBUSY;
+       }
+       rcu_read_unlock();
 
        if (threadgroup) {
                struct task_struct *c;
 
                rcu_read_lock();
                list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
-                       if (is_task_frozen_enough(c)) {
+                       if (__cgroup_freezing_or_frozen(c)) {
                                rcu_read_unlock();
                                return -EBUSY;
                        }
@@ -236,31 +229,30 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 /*
  * caller must hold freezer->lock
  */
-static void update_freezer_state(struct cgroup *cgroup,
+static void update_if_frozen(struct cgroup *cgroup,
                                 struct freezer *freezer)
 {
        struct cgroup_iter it;
        struct task_struct *task;
        unsigned int nfrozen = 0, ntotal = 0;
+       enum freezer_state old_state = freezer->state;
 
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (is_task_frozen_enough(task))
+               if (frozen(task))
                        nfrozen++;
        }
 
-       /*
-        * Transition to FROZEN when no new tasks can be added ensures
-        * that we never exist in the FROZEN state while there are unfrozen
-        * tasks.
-        */
-       if (nfrozen == ntotal)
-               freezer->state = CGROUP_FROZEN;
-       else if (nfrozen > 0)
-               freezer->state = CGROUP_FREEZING;
-       else
-               freezer->state = CGROUP_THAWED;
+       if (old_state == CGROUP_THAWED) {
+               BUG_ON(nfrozen > 0);
+       } else if (old_state == CGROUP_FREEZING) {
+               if (nfrozen == ntotal)
+                       freezer->state = CGROUP_FROZEN;
+       } else { /* old_state == CGROUP_FROZEN */
+               BUG_ON(nfrozen != ntotal);
+       }
+
        cgroup_iter_end(cgroup, &it);
 }
 
@@ -279,7 +271,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
        if (state == CGROUP_FREEZING) {
                /* We change from FREEZING to FROZEN lazily if the cgroup was
                 * only partially frozen when we exitted write. */
-               update_freezer_state(cgroup, freezer);
+               update_if_frozen(cgroup, freezer);
                state = freezer->state;
        }
        spin_unlock_irq(&freezer->lock);
@@ -301,7 +293,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        while ((task = cgroup_iter_next(cgroup, &it))) {
                if (!freeze_task(task, true))
                        continue;
-               if (is_task_frozen_enough(task))
+               if (frozen(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
@@ -335,7 +327,7 @@ static int freezer_change_state(struct cgroup *cgroup,
 
        spin_lock_irq(&freezer->lock);
 
-       update_freezer_state(cgroup, freezer);
+       update_if_frozen(cgroup, freezer);
        if (goal_state == freezer->state)
                goto out;
 
index 9a3e22641fe736c5d97b8c1f1f8b321886b4bcad..6a1aa004e376615fed91a1cbd4c2bc6149896957 100644 (file)
@@ -325,7 +325,7 @@ EXPORT_SYMBOL(prepare_creds);
 
 /*
  * Prepare credentials for current to perform an execve()
- * - The caller must hold current->cred_guard_mutex
+ * - The caller must hold ->cred_guard_mutex
  */
 struct cred *prepare_exec_creds(void)
 {
@@ -384,8 +384,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
        struct cred *new;
        int ret;
 
-       mutex_init(&p->cred_guard_mutex);
-
        if (
 #ifdef CONFIG_KEYS
                !p->cred->thread_keyring &&
index 894179a32ec163fe3347f8614626ddf7a9946cb7..b194febf5799bab766e5aef261e566902a086b28 100644 (file)
@@ -703,6 +703,8 @@ static void exit_mm(struct task_struct * tsk)
  * space.
  */
 static struct task_struct *find_new_reaper(struct task_struct *father)
+       __releases(&tasklist_lock)
+       __acquires(&tasklist_lock)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(father);
        struct task_struct *thread;
index e87aaaaf5131de2bb924c963d618a52f5de01127..3b159c5991b7561bdba253eeb479f91622a35fb9 100644 (file)
@@ -908,6 +908,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->oom_adj = current->signal->oom_adj;
        sig->oom_score_adj = current->signal->oom_score_adj;
 
+       mutex_init(&sig->cred_guard_mutex);
+
        return 0;
 }
 
index 9d917ff726759a931443e3b03693a558cc65b377..9988d03797f5660dea26f417d9002fae94fd2798 100644 (file)
@@ -393,3 +393,18 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
        struct irq_desc *desc = irq_to_desc(irq);
        return desc ? desc->kstat_irqs[cpu] : 0;
 }
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+unsigned int kstat_irqs(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       int cpu;
+       int sum = 0;
+
+       if (!desc)
+               return 0;
+       for_each_possible_cpu(cpu)
+               sum += desc->kstat_irqs[cpu];
+       return sum;
+}
+#endif /* CONFIG_GENERIC_HARDIRQS */
index 56a891914273319e0e0a69ca911d7824db1341f8..99865c33a60d6347f48d46976e75d2baee579156 100644 (file)
@@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 /* NOTE: change this value only with kprobe_mutex held */
 static bool kprobes_all_disarmed;
 
-static DEFINE_MUTEX(kprobe_mutex);     /* Protects kprobe_table */
+/* This protects kprobe_table and optimizing_list */
+static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 static struct {
        spinlock_t lock ____cacheline_aligned_in_smp;
@@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
 }
 
 #ifdef CONFIG_SYSCTL
+/* This should be called with kprobe_mutex locked */
 static void __kprobes optimize_all_kprobes(void)
 {
        struct hlist_head *head;
@@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void)
                return;
 
        kprobes_allow_optimization = true;
-       mutex_lock(&text_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                hlist_for_each_entry_rcu(p, node, head, hlist)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
-       mutex_unlock(&text_mutex);
        printk(KERN_INFO "Kprobes globally optimized\n");
 }
 
+/* This should be called with kprobe_mutex locked */
 static void __kprobes unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
index 2df46301a7a407dcde3435542e38f6944358d7c1..437a74a7524a7f592f669dbdc91a6c90503959ae 100644 (file)
@@ -2037,7 +2037,7 @@ static inline void layout_symtab(struct module *mod, struct load_info *info)
 {
 }
 
-static void add_kallsyms(struct module *mod, struct load_info *info)
+static void add_kallsyms(struct module *mod, const struct load_info *info)
 {
 }
 #endif /* CONFIG_KALLSYMS */
index 2a5dfec8efe0504fc974a9500e934c51b78a5207..2c98ad94ba0ee4b5313bf5f1add92281044e789a 100644 (file)
@@ -85,6 +85,14 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
                return ERR_PTR(-EPERM);
        if (!cgroup_is_descendant(cgroup, current))
                return ERR_PTR(-EPERM);
+       if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) {
+               printk("ns_cgroup can't be created with parent "
+                      "'clone_children' set.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       printk_once("ns_cgroup deprecated: consider using the "
+                   "'clone_children' flag without the ns_cgroup.\n");
 
        ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
        if (!ns_cgroup)
index f309e8014c7853105d1a38bc662f10164dc4d3d1..517d827f498281da50210aa76a02b4693116154a 100644 (file)
@@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event)
        return event->cpu == -1 || event->cpu == smp_processor_id();
 }
 
-static int
-__event_sched_out(struct perf_event *event,
+static void
+event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
@@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event,
        }
 
        if (event->state != PERF_EVENT_STATE_ACTIVE)
-               return 0;
+               return;
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        if (event->pending_disable) {
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
+       event->tstamp_stopped = ctx->time;
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
@@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event,
        ctx->nr_active--;
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
-       return 1;
-}
-
-static void
-event_sched_out(struct perf_event *event,
-                 struct perf_cpu_context *cpuctx,
-                 struct perf_event_context *ctx)
-{
-       int ret;
-
-       ret = __event_sched_out(event, cpuctx, ctx);
-       if (ret)
-               event->tstamp_stopped = ctx->time;
 }
 
 static void
@@ -664,7 +652,7 @@ retry:
 }
 
 static int
-__event_sched_in(struct perf_event *event,
+event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
 {
@@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event,
                return -EAGAIN;
        }
 
+       event->tstamp_running += ctx->time - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        ctx->nr_active++;
@@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event,
        return 0;
 }
 
-static inline int
-event_sched_in(struct perf_event *event,
-                struct perf_cpu_context *cpuctx,
-                struct perf_event_context *ctx)
-{
-       int ret = __event_sched_in(event, cpuctx, ctx);
-       if (ret)
-               return ret;
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
-       return 0;
-}
-
-static void
-group_commit_event_sched_in(struct perf_event *group_event,
-              struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx)
-{
-       struct perf_event *event;
-       u64 now = ctx->time;
-
-       group_event->tstamp_running += now - group_event->tstamp_stopped;
-       /*
-        * Schedule in siblings as one group (if any):
-        */
-       list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               event->tstamp_running += now - event->tstamp_stopped;
-       }
-}
-
 static int
 group_sched_in(struct perf_event *group_event,
               struct perf_cpu_context *cpuctx,
@@ -730,19 +691,15 @@ group_sched_in(struct perf_event *group_event,
 {
        struct perf_event *event, *partial_group = NULL;
        struct pmu *pmu = group_event->pmu;
+       u64 now = ctx->time;
+       bool simulate = false;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
        pmu->start_txn(pmu);
 
-       /*
-        * use __event_sched_in() to delay updating tstamp_running
-        * until the transaction is committed. In case of failure
-        * we will keep an unmodified tstamp_running which is a
-        * requirement to get correct timing information
-        */
-       if (__event_sched_in(group_event, cpuctx, ctx)) {
+       if (event_sched_in(group_event, cpuctx, ctx)) {
                pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
@@ -751,31 +708,42 @@ group_sched_in(struct perf_event *group_event,
         * Schedule in siblings as one group (if any):
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               if (__event_sched_in(event, cpuctx, ctx)) {
+               if (event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
                }
        }
 
-       if (!pmu->commit_txn(pmu)) {
-               /* commit tstamp_running */
-               group_commit_event_sched_in(group_event, cpuctx, ctx);
+       if (!pmu->commit_txn(pmu))
                return 0;
-       }
+
 group_error:
        /*
         * Groups can be scheduled in as one unit only, so undo any
         * partial group before returning:
+        * The events up to the failed event are scheduled out normally,
+        * tstamp_stopped will be updated.
         *
-        * use __event_sched_out() to avoid updating tstamp_stopped
-        * because the event never actually ran
+        * The failed events and the remaining siblings need to have
+        * their timings updated as if they had gone thru event_sched_in()
+        * and event_sched_out(). This is required to get consistent timings
+        * across the group. This also takes care of the case where the group
+        * could never be scheduled by ensuring tstamp_stopped is set to mark
+        * the time the event was actually stopped, such that time delta
+        * calculation in update_event_times() is correct.
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
                if (event == partial_group)
-                       break;
-               __event_sched_out(event, cpuctx, ctx);
+                       simulate = true;
+
+               if (simulate) {
+                       event->tstamp_running += now - event->tstamp_stopped;
+                       event->tstamp_stopped = now;
+               } else {
+                       event_sched_out(event, cpuctx, ctx);
+               }
        }
-       __event_sched_out(group_event, cpuctx, ctx);
+       event_sched_out(group_event, cpuctx, ctx);
 
        pmu->cancel_txn(pmu);
 
index f34d798ef4a25831b98090a7b85fa6d3b93d0de2..99bbaa3e5b0d4332a0d9ff7e1f057dbbecc10a50 100644 (file)
@@ -181,7 +181,7 @@ int ptrace_attach(struct task_struct *task)
         * under ptrace.
         */
        retval = -ERESTARTNOINTR;
-       if (mutex_lock_interruptible(&task->cred_guard_mutex))
+       if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
                goto out;
 
        task_lock(task);
@@ -208,7 +208,7 @@ int ptrace_attach(struct task_struct *task)
 unlock_tasklist:
        write_unlock_irq(&tasklist_lock);
 unlock_creds:
-       mutex_unlock(&task->cred_guard_mutex);
+       mutex_unlock(&task->signal->cred_guard_mutex);
 out:
        return retval;
 }
@@ -329,6 +329,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
  * and reacquire the lock.
  */
 void exit_ptrace(struct task_struct *tracer)
+       __releases(&tasklist_lock)
+       __acquires(&tasklist_lock)
 {
        struct task_struct *p, *n;
        LIST_HEAD(ptrace_dead);
@@ -402,7 +404,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
        return copied;
 }
 
-static int ptrace_setoptions(struct task_struct *child, long data)
+static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 {
        child->ptrace &= ~PT_TRACE_MASK;
 
@@ -481,7 +483,8 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 #define is_sysemu_singlestep(request)  0
 #endif
 
-static int ptrace_resume(struct task_struct *child, long request, long data)
+static int ptrace_resume(struct task_struct *child, long request,
+                        unsigned long data)
 {
        if (!valid_signal(data))
                return -EIO;
@@ -558,10 +561,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 #endif
 
 int ptrace_request(struct task_struct *child, long request,
-                  long addr, long data)
+                  unsigned long addr, unsigned long data)
 {
        int ret = -EIO;
        siginfo_t siginfo;
+       void __user *datavp = (void __user *) data;
+       unsigned long __user *datalp = datavp;
 
        switch (request) {
        case PTRACE_PEEKTEXT:
@@ -578,19 +583,17 @@ int ptrace_request(struct task_struct *child, long request,
                ret = ptrace_setoptions(child, data);
                break;
        case PTRACE_GETEVENTMSG:
-               ret = put_user(child->ptrace_message, (unsigned long __user *) data);
+               ret = put_user(child->ptrace_message, datalp);
                break;
 
        case PTRACE_GETSIGINFO:
                ret = ptrace_getsiginfo(child, &siginfo);
                if (!ret)
-                       ret = copy_siginfo_to_user((siginfo_t __user *) data,
-                                                  &siginfo);
+                       ret = copy_siginfo_to_user(datavp, &siginfo);
                break;
 
        case PTRACE_SETSIGINFO:
-               if (copy_from_user(&siginfo, (siginfo_t __user *) data,
-                                  sizeof siginfo))
+               if (copy_from_user(&siginfo, datavp, sizeof siginfo))
                        ret = -EFAULT;
                else
                        ret = ptrace_setsiginfo(child, &siginfo);
@@ -621,7 +624,7 @@ int ptrace_request(struct task_struct *child, long request,
                }
                mmput(mm);
 
-               ret = put_user(tmp, (unsigned long __user *) data);
+               ret = put_user(tmp, datalp);
                break;
        }
 #endif
@@ -650,7 +653,7 @@ int ptrace_request(struct task_struct *child, long request,
        case PTRACE_SETREGSET:
        {
                struct iovec kiov;
-               struct iovec __user *uiov = (struct iovec __user *) data;
+               struct iovec __user *uiov = datavp;
 
                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
                        return -EFAULT;
@@ -691,7 +694,8 @@ static struct task_struct *ptrace_get_task_struct(pid_t pid)
 #define arch_ptrace_attach(child)      do { } while (0)
 #endif
 
-SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+               unsigned long, data)
 {
        struct task_struct *child;
        long ret;
@@ -732,7 +736,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
        return ret;
 }
 
-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data)
 {
        unsigned long tmp;
        int copied;
@@ -743,7 +748,8 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
        return put_user(tmp, (unsigned long __user *)data);
 }
 
-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+                           unsigned long data)
 {
        int copied;
 
index 7b36976e5dea84d11b73fcd5d4ae8963ce4c9aa7..9c9841cb69021fdd259510ce7f117cedf14f0ad7 100644 (file)
@@ -453,6 +453,8 @@ static struct resource * __insert_resource(struct resource *parent, struct resou
 
                if (first == parent)
                        return first;
+               if (WARN_ON(first == new))      /* duplicated insertion */
+                       return first;
 
                if ((first->start > new->start) || (first->end < new->end))
                        break;
index 919562c3d6b720d58ff246b2c412114d77c0b419..4e3cff10fdceda165e8e9f22c0e9b861dba918d8 100644 (file)
@@ -1105,7 +1105,8 @@ int zap_other_threads(struct task_struct *p)
        return count;
 }
 
-struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+                                          unsigned long *flags)
 {
        struct sighand_struct *sighand;
 
@@ -1617,6 +1618,8 @@ static int sigkill_pending(struct task_struct *tsk)
  * is gone, we keep current->exit_code unless clear_code.
  */
 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
+       __releases(&current->sighand->siglock)
+       __acquires(&current->sighand->siglock)
 {
        if (arch_ptrace_stop_needed(exit_code, info)) {
                /*
index ed6aacfcb7efb307fe313ea798e7074f2c8f4f92..12ed8b013e2d7cd8be438a808f3a6981ab4cd70a 100644 (file)
@@ -267,7 +267,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
  *
  * Returns 0 on success, else a negative status code.
  */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
                             int wait)
 {
        struct call_single_data d = {
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  *     3) any other online cpu in @mask
  */
 int smp_call_function_any(const struct cpumask *mask,
-                         void (*func)(void *info), void *info, int wait)
+                         smp_call_func_t func, void *info, int wait)
 {
        unsigned int cpu;
        const struct cpumask *nodemask;
@@ -416,7 +416,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
  * must be disabled when calling this function.
  */
 void smp_call_function_many(const struct cpumask *mask,
-                           void (*func)(void *), void *info, bool wait)
+                           smp_call_func_t func, void *info, bool wait)
 {
        struct call_function_data *data;
        unsigned long flags;
@@ -500,7 +500,7 @@ EXPORT_SYMBOL(smp_call_function_many);
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(void (*func)(void *), void *info, int wait)
+int smp_call_function(smp_call_func_t func, void *info, int wait)
 {
        preempt_disable();
        smp_call_function_many(cpu_online_mask, func, info, wait);
index f02a9dfa19bc80a5fd2bd1877ae7e685a7538411..18f4be0d5fe0bbf853935972d9b441e95bc61c5a 100644 (file)
@@ -229,18 +229,20 @@ restart:
 
        do {
                if (pending & 1) {
+                       unsigned int vec_nr = h - softirq_vec;
                        int prev_count = preempt_count();
-                       kstat_incr_softirqs_this_cpu(h - softirq_vec);
 
-                       trace_softirq_entry(h, softirq_vec);
+                       kstat_incr_softirqs_this_cpu(vec_nr);
+
+                       trace_softirq_entry(vec_nr);
                        h->action(h);
-                       trace_softirq_exit(h, softirq_vec);
+                       trace_softirq_exit(vec_nr);
                        if (unlikely(prev_count != preempt_count())) {
-                               printk(KERN_ERR "huh, entered softirq %td %s %p"
+                               printk(KERN_ERR "huh, entered softirq %u %s %p"
                                       "with preempt_count %08x,"
-                                      " exited with %08x?\n", h - softirq_vec,
-                                      softirq_to_name[h - softirq_vec],
-                                      h->action, prev_count, preempt_count());
+                                      " exited with %08x?\n", vec_nr,
+                                      softirq_to_name[vec_nr], h->action,
+                                      prev_count, preempt_count());
                                preempt_count() = prev_count;
                        }
 
index 11281d5792bd5b4d0eb7d5b86034ca4b2a4d256a..c8231fb1570831d78215ab9967958a1663424ed0 100644 (file)
@@ -175,22 +175,8 @@ static void send_cpu_listeners(struct sk_buff *skb,
        up_write(&listeners->sem);
 }
 
-static int fill_pid(pid_t pid, struct task_struct *tsk,
-               struct taskstats *stats)
+static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
 {
-       int rc = 0;
-
-       if (!tsk) {
-               rcu_read_lock();
-               tsk = find_task_by_vpid(pid);
-               if (tsk)
-                       get_task_struct(tsk);
-               rcu_read_unlock();
-               if (!tsk)
-                       return -ESRCH;
-       } else
-               get_task_struct(tsk);
-
        memset(stats, 0, sizeof(*stats));
        /*
         * Each accounting subsystem adds calls to its functions to
@@ -209,17 +195,27 @@ static int fill_pid(pid_t pid, struct task_struct *tsk,
 
        /* fill in extended acct fields */
        xacct_add_tsk(stats, tsk);
+}
 
-       /* Define err: label here if needed */
-       put_task_struct(tsk);
-       return rc;
+static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
+{
+       struct task_struct *tsk;
 
+       rcu_read_lock();
+       tsk = find_task_by_vpid(pid);
+       if (tsk)
+               get_task_struct(tsk);
+       rcu_read_unlock();
+       if (!tsk)
+               return -ESRCH;
+       fill_stats(tsk, stats);
+       put_task_struct(tsk);
+       return 0;
 }
 
-static int fill_tgid(pid_t tgid, struct task_struct *first,
-               struct taskstats *stats)
+static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
 {
-       struct task_struct *tsk;
+       struct task_struct *tsk, *first;
        unsigned long flags;
        int rc = -ESRCH;
 
@@ -228,8 +224,7 @@ static int fill_tgid(pid_t tgid, struct task_struct *first,
         * leaders who are already counted with the dead tasks
         */
        rcu_read_lock();
-       if (!first)
-               first = find_task_by_vpid(tgid);
+       first = find_task_by_vpid(tgid);
 
        if (!first || !lock_task_sighand(first, &flags))
                goto out;
@@ -268,7 +263,6 @@ out:
        return rc;
 }
 
-
 static void fill_tgid_exit(struct task_struct *tsk)
 {
        unsigned long flags;
@@ -360,6 +354,12 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
        struct nlattr *na, *ret;
        int aggr;
 
+       /* If we don't pad, we end up with alignment on a 4 byte boundary.
+        * This causes lots of runtime warnings on systems requiring 8 byte
+        * alignment */
+       u32 pids[2] = { pid, 0 };
+       int pid_size = ALIGN(sizeof(pid), sizeof(long));
+
        aggr = (type == TASKSTATS_TYPE_PID)
                        ? TASKSTATS_TYPE_AGGR_PID
                        : TASKSTATS_TYPE_AGGR_TGID;
@@ -367,7 +367,7 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
        na = nla_nest_start(skb, aggr);
        if (!na)
                goto err;
-       if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+       if (nla_put(skb, type, pid_size, pids) < 0)
                goto err;
        ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
        if (!ret)
@@ -424,39 +424,46 @@ err:
        return rc;
 }
 
-static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+static int cmd_attr_register_cpumask(struct genl_info *info)
 {
-       int rc;
-       struct sk_buff *rep_skb;
-       struct taskstats *stats;
-       size_t size;
        cpumask_var_t mask;
+       int rc;
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
                return -ENOMEM;
-
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
        if (rc < 0)
-               goto free_return_rc;
-       if (rc == 0) {
-               rc = add_del_listener(info->snd_pid, mask, REGISTER);
-               goto free_return_rc;
-       }
+               goto out;
+       rc = add_del_listener(info->snd_pid, mask, REGISTER);
+out:
+       free_cpumask_var(mask);
+       return rc;
+}
+
+static int cmd_attr_deregister_cpumask(struct genl_info *info)
+{
+       cpumask_var_t mask;
+       int rc;
 
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
        if (rc < 0)
-               goto free_return_rc;
-       if (rc == 0) {
-               rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
-free_return_rc:
-               free_cpumask_var(mask);
-               return rc;
-       }
+               goto out;
+       rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
+out:
        free_cpumask_var(mask);
+       return rc;
+}
+
+static int cmd_attr_pid(struct genl_info *info)
+{
+       struct taskstats *stats;
+       struct sk_buff *rep_skb;
+       size_t size;
+       u32 pid;
+       int rc;
 
-       /*
-        * Size includes space for nested attributes
-        */
        size = nla_total_size(sizeof(u32)) +
                nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
@@ -465,33 +472,64 @@ free_return_rc:
                return rc;
 
        rc = -EINVAL;
-       if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
-               u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
-               stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
-               if (!stats)
-                       goto err;
-
-               rc = fill_pid(pid, NULL, stats);
-               if (rc < 0)
-                       goto err;
-       } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
-               u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
-               stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
-               if (!stats)
-                       goto err;
-
-               rc = fill_tgid(tgid, NULL, stats);
-               if (rc < 0)
-                       goto err;
-       } else
+       pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
+       if (!stats)
+               goto err;
+
+       rc = fill_stats_for_pid(pid, stats);
+       if (rc < 0)
+               goto err;
+       return send_reply(rep_skb, info);
+err:
+       nlmsg_free(rep_skb);
+       return rc;
+}
+
+static int cmd_attr_tgid(struct genl_info *info)
+{
+       struct taskstats *stats;
+       struct sk_buff *rep_skb;
+       size_t size;
+       u32 tgid;
+       int rc;
+
+       size = nla_total_size(sizeof(u32)) +
+               nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
+
+       rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
+       if (rc < 0)
+               return rc;
+
+       rc = -EINVAL;
+       tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
+       if (!stats)
                goto err;
 
+       rc = fill_stats_for_tgid(tgid, stats);
+       if (rc < 0)
+               goto err;
        return send_reply(rep_skb, info);
 err:
        nlmsg_free(rep_skb);
        return rc;
 }
 
+static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+       if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+               return cmd_attr_register_cpumask(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+               return cmd_attr_deregister_cpumask(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
+               return cmd_attr_pid(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
+               return cmd_attr_tgid(info);
+       else
+               return -EINVAL;
+}
+
 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
        struct signal_struct *sig = tsk->signal;
@@ -555,9 +593,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
        if (!stats)
                goto err;
 
-       rc = fill_pid(-1, tsk, stats);
-       if (rc < 0)
-               goto err;
+       fill_stats(tsk, stats);
 
        /*
         * Doesn't matter if tsk is the leader or the last group member leaving
index c3dab054d18e2ff43b618ad29693351d0accc872..9ed509a015d81a69cd452b3697658dd7fde9cb71 100644 (file)
@@ -224,6 +224,9 @@ enum {
        RB_LEN_TIME_STAMP = 16,
 };
 
+#define skip_time_extend(event) \
+       ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
+
 static inline int rb_null_event(struct ring_buffer_event *event)
 {
        return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
@@ -248,8 +251,12 @@ rb_event_data_length(struct ring_buffer_event *event)
        return length + RB_EVNT_HDR_SIZE;
 }
 
-/* inline for ring buffer fast paths */
-static unsigned
+/*
+ * Return the length of the given event. Will return
+ * the length of the time extend if the event is a
+ * time extend.
+ */
+static inline unsigned
 rb_event_length(struct ring_buffer_event *event)
 {
        switch (event->type_len) {
@@ -274,13 +281,41 @@ rb_event_length(struct ring_buffer_event *event)
        return 0;
 }
 
+/*
+ * Return total length of time extend and data,
+ *   or just the event length for all other events.
+ */
+static inline unsigned
+rb_event_ts_length(struct ring_buffer_event *event)
+{
+       unsigned len = 0;
+
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+               /* time extends include the data event after it */
+               len = RB_LEN_TIME_EXTEND;
+               event = skip_time_extend(event);
+       }
+       return len + rb_event_length(event);
+}
+
 /**
  * ring_buffer_event_length - return the length of the event
  * @event: the event to get the length of
+ *
+ * Returns the size of the data load of a data event.
+ * If the event is something other than a data event, it
+ * returns the size of the event itself. With the exception
+ * of a TIME EXTEND, where it still returns the size of the
+ * data load of the data event after it.
  */
 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 {
-       unsigned length = rb_event_length(event);
+       unsigned length;
+
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+               event = skip_time_extend(event);
+
+       length = rb_event_length(event);
        if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
                return length;
        length -= RB_EVNT_HDR_SIZE;
@@ -294,6 +329,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 static void *
 rb_event_data(struct ring_buffer_event *event)
 {
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+               event = skip_time_extend(event);
        BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
        /* If length is in len field, then array[0] has the data */
        if (event->type_len)
@@ -404,9 +441,6 @@ static inline int test_time_stamp(u64 delta)
 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 
-/* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
-
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
        struct buffer_data_page field;
@@ -1546,6 +1580,25 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
        iter->head = 0;
 }
 
+/* Slow path, do not inline */
+static noinline struct ring_buffer_event *
+rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
+{
+       event->type_len = RINGBUF_TYPE_TIME_EXTEND;
+
+       /* Not the first event on the page? */
+       if (rb_event_index(event)) {
+               event->time_delta = delta & TS_MASK;
+               event->array[0] = delta >> TS_SHIFT;
+       } else {
+               /* nope, just zero it */
+               event->time_delta = 0;
+               event->array[0] = 0;
+       }
+
+       return skip_time_extend(event);
+}
+
 /**
  * ring_buffer_update_event - update event type and data
  * @event: the even to update
@@ -1558,28 +1611,31 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
  * data field.
  */
 static void
-rb_update_event(struct ring_buffer_event *event,
-                        unsigned type, unsigned length)
+rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
+               struct ring_buffer_event *event, unsigned length,
+               int add_timestamp, u64 delta)
 {
-       event->type_len = type;
-
-       switch (type) {
-
-       case RINGBUF_TYPE_PADDING:
-       case RINGBUF_TYPE_TIME_EXTEND:
-       case RINGBUF_TYPE_TIME_STAMP:
-               break;
+       /* Only a commit updates the timestamp */
+       if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
+               delta = 0;
 
-       case 0:
-               length -= RB_EVNT_HDR_SIZE;
-               if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
-                       event->array[0] = length;
-               else
-                       event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
-               break;
-       default:
-               BUG();
+       /*
+        * If we need to add a timestamp, then we
+        * add it to the start of the resevered space.
+        */
+       if (unlikely(add_timestamp)) {
+               event = rb_add_time_stamp(event, delta);
+               length -= RB_LEN_TIME_EXTEND;
+               delta = 0;
        }
+
+       event->time_delta = delta;
+       length -= RB_EVNT_HDR_SIZE;
+       if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
+               event->type_len = 0;
+               event->array[0] = length;
+       } else
+               event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
 }
 
 /*
@@ -1823,10 +1879,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
        local_sub(length, &tail_page->write);
 }
 
-static struct ring_buffer_event *
+/*
+ * This is the slow path, force gcc not to inline it.
+ */
+static noinline struct ring_buffer_event *
 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
             unsigned long length, unsigned long tail,
-            struct buffer_page *tail_page, u64 *ts)
+            struct buffer_page *tail_page, u64 ts)
 {
        struct buffer_page *commit_page = cpu_buffer->commit_page;
        struct ring_buffer *buffer = cpu_buffer->buffer;
@@ -1909,8 +1968,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
                 * Nested commits always have zero deltas, so
                 * just reread the time stamp
                 */
-               *ts = rb_time_stamp(buffer);
-               next_page->page->time_stamp = *ts;
+               ts = rb_time_stamp(buffer);
+               next_page->page->time_stamp = ts;
        }
 
  out_again:
@@ -1929,12 +1988,21 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
 
 static struct ring_buffer_event *
 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
-                 unsigned type, unsigned long length, u64 *ts)
+                 unsigned long length, u64 ts,
+                 u64 delta, int add_timestamp)
 {
        struct buffer_page *tail_page;
        struct ring_buffer_event *event;
        unsigned long tail, write;
 
+       /*
+        * If the time delta since the last event is too big to
+        * hold in the time field of the event, then we append a
+        * TIME EXTEND event ahead of the data event.
+        */
+       if (unlikely(add_timestamp))
+               length += RB_LEN_TIME_EXTEND;
+
        tail_page = cpu_buffer->tail_page;
        write = local_add_return(length, &tail_page->write);
 
@@ -1943,7 +2011,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        tail = write - length;
 
        /* See if we shot pass the end of this buffer page */
-       if (write > BUF_PAGE_SIZE)
+       if (unlikely(write > BUF_PAGE_SIZE))
                return rb_move_tail(cpu_buffer, length, tail,
                                    tail_page, ts);
 
@@ -1951,18 +2019,16 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
        event = __rb_page_index(tail_page, tail);
        kmemcheck_annotate_bitfield(event, bitfield);
-       rb_update_event(event, type, length);
+       rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
 
-       /* The passed in type is zero for DATA */
-       if (likely(!type))
-               local_inc(&tail_page->entries);
+       local_inc(&tail_page->entries);
 
        /*
         * If this is the first commit on the page, then update
         * its timestamp.
         */
        if (!tail)
-               tail_page->page->time_stamp = *ts;
+               tail_page->page->time_stamp = ts;
 
        return event;
 }
@@ -1977,7 +2043,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        unsigned long addr;
 
        new_index = rb_event_index(event);
-       old_index = new_index + rb_event_length(event);
+       old_index = new_index + rb_event_ts_length(event);
        addr = (unsigned long)event;
        addr &= PAGE_MASK;
 
@@ -2003,76 +2069,13 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        return 0;
 }
 
-static int
-rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
-                 u64 *ts, u64 *delta)
-{
-       struct ring_buffer_event *event;
-       int ret;
-
-       WARN_ONCE(*delta > (1ULL << 59),
-                 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
-                 (unsigned long long)*delta,
-                 (unsigned long long)*ts,
-                 (unsigned long long)cpu_buffer->write_stamp);
-
-       /*
-        * The delta is too big, we to add a
-        * new timestamp.
-        */
-       event = __rb_reserve_next(cpu_buffer,
-                                 RINGBUF_TYPE_TIME_EXTEND,
-                                 RB_LEN_TIME_EXTEND,
-                                 ts);
-       if (!event)
-               return -EBUSY;
-
-       if (PTR_ERR(event) == -EAGAIN)
-               return -EAGAIN;
-
-       /* Only a commited time event can update the write stamp */
-       if (rb_event_is_commit(cpu_buffer, event)) {
-               /*
-                * If this is the first on the page, then it was
-                * updated with the page itself. Try to discard it
-                * and if we can't just make it zero.
-                */
-               if (rb_event_index(event)) {
-                       event->time_delta = *delta & TS_MASK;
-                       event->array[0] = *delta >> TS_SHIFT;
-               } else {
-                       /* try to discard, since we do not need this */
-                       if (!rb_try_to_discard(cpu_buffer, event)) {
-                               /* nope, just zero it */
-                               event->time_delta = 0;
-                               event->array[0] = 0;
-                       }
-               }
-               cpu_buffer->write_stamp = *ts;
-               /* let the caller know this was the commit */
-               ret = 1;
-       } else {
-               /* Try to discard the event */
-               if (!rb_try_to_discard(cpu_buffer, event)) {
-                       /* Darn, this is just wasted space */
-                       event->time_delta = 0;
-                       event->array[0] = 0;
-               }
-               ret = 0;
-       }
-
-       *delta = 0;
-
-       return ret;
-}
-
 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
        local_inc(&cpu_buffer->committing);
        local_inc(&cpu_buffer->commits);
 }
 
-static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
        unsigned long commits;
 
@@ -2110,9 +2113,10 @@ rb_reserve_next_event(struct ring_buffer *buffer,
                      unsigned long length)
 {
        struct ring_buffer_event *event;
-       u64 ts, delta = 0;
-       int commit = 0;
+       u64 ts, delta;
        int nr_loops = 0;
+       int add_timestamp;
+       u64 diff;
 
        rb_start_commit(cpu_buffer);
 
@@ -2133,6 +2137,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
        length = rb_calculate_event_length(length);
  again:
+       add_timestamp = 0;
+       delta = 0;
+
        /*
         * We allow for interrupts to reenter here and do a trace.
         * If one does, it will cause this original code to loop
@@ -2146,56 +2153,32 @@ rb_reserve_next_event(struct ring_buffer *buffer,
                goto out_fail;
 
        ts = rb_time_stamp(cpu_buffer->buffer);
+       diff = ts - cpu_buffer->write_stamp;
 
-       /*
-        * Only the first commit can update the timestamp.
-        * Yes there is a race here. If an interrupt comes in
-        * just after the conditional and it traces too, then it
-        * will also check the deltas. More than one timestamp may
-        * also be made. But only the entry that did the actual
-        * commit will be something other than zero.
-        */
-       if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
-                  rb_page_write(cpu_buffer->tail_page) ==
-                  rb_commit_index(cpu_buffer))) {
-               u64 diff;
-
-               diff = ts - cpu_buffer->write_stamp;
-
-               /* make sure this diff is calculated here */
-               barrier();
-
-               /* Did the write stamp get updated already? */
-               if (unlikely(ts < cpu_buffer->write_stamp))
-                       goto get_event;
+       /* make sure this diff is calculated here */
+       barrier();
 
+       /* Did the write stamp get updated already? */
+       if (likely(ts >= cpu_buffer->write_stamp)) {
                delta = diff;
                if (unlikely(test_time_stamp(delta))) {
-
-                       commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
-                       if (commit == -EBUSY)
-                               goto out_fail;
-
-                       if (commit == -EAGAIN)
-                               goto again;
-
-                       RB_WARN_ON(cpu_buffer, commit < 0);
+                       WARN_ONCE(delta > (1ULL << 59),
+                                 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
+                                 (unsigned long long)delta,
+                                 (unsigned long long)ts,
+                                 (unsigned long long)cpu_buffer->write_stamp);
+                       add_timestamp = 1;
                }
        }
 
- get_event:
-       event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
+       event = __rb_reserve_next(cpu_buffer, length, ts,
+                                 delta, add_timestamp);
        if (unlikely(PTR_ERR(event) == -EAGAIN))
                goto again;
 
        if (!event)
                goto out_fail;
 
-       if (!rb_event_is_commit(cpu_buffer, event))
-               delta = 0;
-
-       event->time_delta = delta;
-
        return event;
 
  out_fail:
@@ -2207,13 +2190,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
 #define TRACE_RECURSIVE_DEPTH 16
 
-static int trace_recursive_lock(void)
+/* Keep this code out of the fast path cache */
+static noinline void trace_recursive_fail(void)
 {
-       current->trace_recursion++;
-
-       if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
-               return 0;
-
        /* Disable all tracing before we do anything else */
        tracing_off_permanent();
 
@@ -2225,10 +2204,21 @@ static int trace_recursive_lock(void)
                    in_nmi());
 
        WARN_ON_ONCE(1);
+}
+
+static inline int trace_recursive_lock(void)
+{
+       current->trace_recursion++;
+
+       if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
+               return 0;
+
+       trace_recursive_fail();
+
        return -1;
 }
 
-static void trace_recursive_unlock(void)
+static inline void trace_recursive_unlock(void)
 {
        WARN_ON_ONCE(!current->trace_recursion);
 
@@ -2308,12 +2298,28 @@ static void
 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
                      struct ring_buffer_event *event)
 {
+       u64 delta;
+
        /*
         * The event first in the commit queue updates the
         * time stamp.
         */
-       if (rb_event_is_commit(cpu_buffer, event))
-               cpu_buffer->write_stamp += event->time_delta;
+       if (rb_event_is_commit(cpu_buffer, event)) {
+               /*
+                * A commit event that is first on a page
+                * updates the write timestamp with the page stamp
+                */
+               if (!rb_event_index(event))
+                       cpu_buffer->write_stamp =
+                               cpu_buffer->commit_page->page->time_stamp;
+               else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+                       delta = event->array[0];
+                       delta <<= TS_SHIFT;
+                       delta += event->time_delta;
+                       cpu_buffer->write_stamp += delta;
+               } else
+                       cpu_buffer->write_stamp += event->time_delta;
+       }
 }
 
 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
@@ -2353,6 +2359,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
 
 static inline void rb_event_discard(struct ring_buffer_event *event)
 {
+       if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+               event = skip_time_extend(event);
+
        /* array[0] holds the actual length for the discarded event */
        event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
        event->type_len = RINGBUF_TYPE_PADDING;
@@ -3049,12 +3058,12 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
 
  again:
        /*
-        * We repeat when a timestamp is encountered. It is possible
-        * to get multiple timestamps from an interrupt entering just
-        * as one timestamp is about to be written, or from discarded
-        * commits. The most that we can have is the number on a single page.
+        * We repeat when a time extend is encountered.
+        * Since the time extend is always attached to a data event,
+        * we should never loop more than once.
+        * (We never hit the following condition more than twice).
         */
-       if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
                return NULL;
 
        reader = rb_get_reader_page(cpu_buffer);
@@ -3130,14 +3139,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
                return NULL;
 
        /*
-        * We repeat when a timestamp is encountered.
-        * We can get multiple timestamps by nested interrupts or also
-        * if filtering is on (discarding commits). Since discarding
-        * commits can be frequent we can get a lot of timestamps.
-        * But we limit them by not adding timestamps if they begin
-        * at the start of a page.
+        * We repeat when a time extend is encountered.
+        * Since the time extend is always attached to a data event,
+        * we should never loop more than once.
+        * (We never hit the following condition more than twice).
         */
-       if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
                return NULL;
 
        if (rb_per_cpu_empty(cpu_buffer))
@@ -3835,7 +3842,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                if (len > (commit - read))
                        len = (commit - read);
 
-               size = rb_event_length(event);
+               /* Always keep the time extend and data together */
+               size = rb_event_ts_length(event);
 
                if (len < size)
                        goto out_unlock;
@@ -3857,7 +3865,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                                break;
 
                        event = rb_reader_event(cpu_buffer);
-                       size = rb_event_length(event);
+                       /* Always keep the time extend and data together */
+                       size = rb_event_ts_length(event);
                } while (len > size);
 
                /* update bpage */
index 001bcd2ccf4afb5170cb06d500bcff90844bf90e..82d9b8106cd078970ea0c7343e12ad2b6a6eed36 100644 (file)
@@ -3996,13 +3996,9 @@ static void tracing_init_debugfs_percpu(long cpu)
 {
        struct dentry *d_percpu = tracing_dentry_percpu();
        struct dentry *d_cpu;
-       /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
-       char cpu_dir[7];
+       char cpu_dir[30]; /* 30 characters should be more than enough */
 
-       if (cpu > 999 || cpu < 0)
-               return;
-
-       sprintf(cpu_dir, "cpu%ld", cpu);
+       snprintf(cpu_dir, 30, "cpu%ld", cpu);
        d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
        if (!d_cpu) {
                pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
index b8d2852baa4abe7368e0aa50c294cc6a6137855a..2dec9bcde8b495bd43d189204e2bc9099d5ec32f 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/perf_event.h>
 #include <linux/stringify.h>
 #include <linux/limits.h>
-#include <linux/uaccess.h>
 #include <asm/bitsperlong.h>
 
 #include "trace.h"
index 0a67e041edf82ee3f614959c2643713063e5739b..24dc60d9fa1f2c09de3dcb12ac33e51a7fd22562 100644 (file)
@@ -63,12 +63,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
        stats->ac_ppid   = pid_alive(tsk) ?
                                rcu_dereference(tsk->real_parent)->tgid : 0;
        rcu_read_unlock();
-       stats->ac_utime  = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
-       stats->ac_stime  = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
-       stats->ac_utimescaled =
-               cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
-       stats->ac_stimescaled =
-               cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
+       stats->ac_utime = cputime_to_usecs(tsk->utime);
+       stats->ac_stime = cputime_to_usecs(tsk->stime);
+       stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled);
+       stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled);
        stats->ac_minflt = tsk->min_flt;
        stats->ac_majflt = tsk->maj_flt;
 
index 995840664a5f388b1248bb5b5b363daa1e89948e..28b42b9274d0b5fe47522d9df8158498be12319a 100644 (file)
@@ -1217,6 +1217,19 @@ config ATOMIC64_SELFTEST
 
          If unsure, say N.
 
+config ASYNC_RAID6_TEST
+       tristate "Self test for hardware accelerated raid6 recovery"
+       depends on ASYNC_RAID6_RECOV
+       select ASYNC_MEMCPY
+       ---help---
+         This is a one-shot self test that permutes through the
+         recovery of all the possible two disk failure scenarios for a
+         N-disk array.  Recovery is performed with the asynchronous
+         raid6 recovery routines, and will optionally use an offload
+         engine if one is available.
+
+         If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
index 781e754a75ac973b15921a7c38c9e62f7a8bad66..693394daa2ed79439d4210c7f5c0bd345ff3692b 100644 (file)
 #include <linux/kgdb.h>
 #include <asm/tlbflush.h>
 
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+#endif
+
 /*
  * Virtual_count is not a pure "count".
  *  0 means that it is not mapped, and has not been mapped
@@ -43,7 +48,6 @@ unsigned long totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(totalhigh_pages);
 
 
-DEFINE_PER_CPU(int, __kmap_atomic_idx);
 EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
 
 unsigned int nr_free_highpages (void)
index 4e348dbaecd75a297e59101aa9cb5b9ffc872968..e2b6f5634e0d38bf4b4a8df34816e86af72a07e9 100644 (file)
@@ -1,9 +1,9 @@
 /*
  * Access kernel memory without faulting.
  */
-#include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/mm.h>
+#include <linux/uaccess.h>
 
 /**
  * probe_kernel_read(): safely attempt to read from a location
index 9be3cf8a5da462d4b1b4103eef61f8d5a9a6e06c..9a99cfaf0a19025f6020c106d56e1ce28d6499b4 100644 (file)
@@ -89,7 +89,10 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
        MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
-       MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
+       MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
+       /* incremented at every  pagein/pageout */
+       MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
+       MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
 
        MEM_CGROUP_STAT_NSTATS,
 };
@@ -254,6 +257,12 @@ struct mem_cgroup {
         * percpu counter.
         */
        struct mem_cgroup_stat_cpu *stat;
+       /*
+        * used when a cpu is offlined or other synchronizations
+        * See mem_cgroup_read_stat().
+        */
+       struct mem_cgroup_stat_cpu nocpu_base;
+       spinlock_t pcp_counter_lock;
 };
 
 /* Stuffs for move charges at task migration. */
@@ -530,14 +539,40 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
        return mz;
 }
 
+/*
+ * Implementation Note: reading percpu statistics for memcg.
+ *
+ * Both of vmstat[] and percpu_counter has threshold and do periodic
+ * synchronization to implement "quick" read. There are trade-off between
+ * reading cost and precision of value. Then, we may have a chance to implement
+ * a periodic synchronizion of counter in memcg's counter.
+ *
+ * But this _read() function is used for user interface now. The user accounts
+ * memory usage by memory cgroup and he _always_ requires exact value because
+ * he accounts memory. Even if we provide quick-and-fuzzy read, we always
+ * have to visit all online cpus and make sum. So, for now, unnecessary
+ * synchronization is not implemented. (just implemented for cpu hotplug)
+ *
+ * If there are kernel internal actions which can make use of some not-exact
+ * value, and reading all cpu value can be performance bottleneck in some
+ * common workload, threashold and synchonization as vmstat[] should be
+ * implemented.
+ */
 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
                enum mem_cgroup_stat_index idx)
 {
        int cpu;
        s64 val = 0;
 
-       for_each_possible_cpu(cpu)
+       get_online_cpus();
+       for_each_online_cpu(cpu)
                val += per_cpu(mem->stat->count[idx], cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+       spin_lock(&mem->pcp_counter_lock);
+       val += mem->nocpu_base.count[idx];
+       spin_unlock(&mem->pcp_counter_lock);
+#endif
+       put_online_cpus();
        return val;
 }
 
@@ -659,40 +694,83 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return mem;
 }
 
-/*
- * Call callback function against all cgroup under hierarchy tree.
- */
-static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
-                         int (*func)(struct mem_cgroup *, void *))
+/* The caller has to guarantee "mem" exists before calling this */
+static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
 {
-       int found, ret, nextid;
        struct cgroup_subsys_state *css;
-       struct mem_cgroup *mem;
-
-       if (!root->use_hierarchy)
-               return (*func)(root, data);
+       int found;
 
-       nextid = 1;
-       do {
-               ret = 0;
+       if (!mem) /* ROOT cgroup has the smallest ID */
+               return root_mem_cgroup; /*css_put/get against root is ignored*/
+       if (!mem->use_hierarchy) {
+               if (css_tryget(&mem->css))
+                       return mem;
+               return NULL;
+       }
+       rcu_read_lock();
+       /*
+        * searching a memory cgroup which has the smallest ID under given
+        * ROOT cgroup. (ID >= 1)
+        */
+       css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
+       if (css && css_tryget(css))
+               mem = container_of(css, struct mem_cgroup, css);
+       else
                mem = NULL;
+       rcu_read_unlock();
+       return mem;
+}
+
+static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
+                                       struct mem_cgroup *root,
+                                       bool cond)
+{
+       int nextid = css_id(&iter->css) + 1;
+       int found;
+       int hierarchy_used;
+       struct cgroup_subsys_state *css;
+
+       hierarchy_used = iter->use_hierarchy;
 
+       css_put(&iter->css);
+       /* If no ROOT, walk all, ignore hierarchy */
+       if (!cond || (root && !hierarchy_used))
+               return NULL;
+
+       if (!root)
+               root = root_mem_cgroup;
+
+       do {
+               iter = NULL;
                rcu_read_lock();
-               css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
-                                  &found);
+
+               css = css_get_next(&mem_cgroup_subsys, nextid,
+                               &root->css, &found);
                if (css && css_tryget(css))
-                       mem = container_of(css, struct mem_cgroup, css);
+                       iter = container_of(css, struct mem_cgroup, css);
                rcu_read_unlock();
-
-               if (mem) {
-                       ret = (*func)(mem, data);
-                       css_put(&mem->css);
-               }
+               /* If css is NULL, no more cgroups will be found */
                nextid = found + 1;
-       } while (!ret && css);
+       } while (css && !iter);
 
-       return ret;
+       return iter;
 }
+/*
+ * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
+ * be careful that "break" loop is not allowed. We have reference count.
+ * Instead of that modify "cond" to be false and "continue" to exit the loop.
+ */
+#define for_each_mem_cgroup_tree_cond(iter, root, cond)        \
+       for (iter = mem_cgroup_start_loop(root);\
+            iter != NULL;\
+            iter = mem_cgroup_get_next(iter, root, cond))
+
+#define for_each_mem_cgroup_tree(iter, root) \
+       for_each_mem_cgroup_tree_cond(iter, root, true)
+
+#define for_each_mem_cgroup_all(iter) \
+       for_each_mem_cgroup_tree_cond(iter, NULL, true)
+
 
 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
 {
@@ -1051,7 +1129,52 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg)
        return swappiness;
 }
 
-/* A routine for testing mem is not under move_account */
+static void mem_cgroup_start_move(struct mem_cgroup *mem)
+{
+       int cpu;
+
+       get_online_cpus();
+       spin_lock(&mem->pcp_counter_lock);
+       for_each_online_cpu(cpu)
+               per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
+       mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
+       spin_unlock(&mem->pcp_counter_lock);
+       put_online_cpus();
+
+       synchronize_rcu();
+}
+
+static void mem_cgroup_end_move(struct mem_cgroup *mem)
+{
+       int cpu;
+
+       if (!mem)
+               return;
+       get_online_cpus();
+       spin_lock(&mem->pcp_counter_lock);
+       for_each_online_cpu(cpu)
+               per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
+       mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
+       spin_unlock(&mem->pcp_counter_lock);
+       put_online_cpus();
+}
+/*
+ * 2 routines for checking "mem" is under move_account() or not.
+ *
+ * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
+ *                       for avoiding race in accounting. If true,
+ *                       pc->mem_cgroup may be overwritten.
+ *
+ * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
+ *                       under hierarchy of moving cgroups. This is for
+ *                       waiting at hith-memory prressure caused by "move".
+ */
+
+static bool mem_cgroup_stealed(struct mem_cgroup *mem)
+{
+       VM_BUG_ON(!rcu_read_lock_held());
+       return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
+}
 
 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
 {
@@ -1092,13 +1215,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
        return false;
 }
 
-static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
-{
-       int *val = data;
-       (*val)++;
-       return 0;
-}
-
 /**
  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
  * @memcg: The memory cgroup that went over limit
@@ -1173,7 +1289,10 @@ done:
 static int mem_cgroup_count_children(struct mem_cgroup *mem)
 {
        int num = 0;
-       mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+       struct mem_cgroup *iter;
+
+       for_each_mem_cgroup_tree(iter, mem)
+               num++;
        return num;
 }
 
@@ -1322,49 +1441,39 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        return total;
 }
 
-static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
-{
-       int *val = (int *)data;
-       int x;
-       /*
-        * Logically, we can stop scanning immediately when we find
-        * a memcg is already locked. But condidering unlock ops and
-        * creation/removal of memcg, scan-all is simple operation.
-        */
-       x = atomic_inc_return(&mem->oom_lock);
-       *val = max(x, *val);
-       return 0;
-}
 /*
  * Check OOM-Killer is already running under our hierarchy.
  * If someone is running, return false.
  */
 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
 {
-       int lock_count = 0;
+       int x, lock_count = 0;
+       struct mem_cgroup *iter;
 
-       mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
+       for_each_mem_cgroup_tree(iter, mem) {
+               x = atomic_inc_return(&iter->oom_lock);
+               lock_count = max(x, lock_count);
+       }
 
        if (lock_count == 1)
                return true;
        return false;
 }
 
-static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
 {
+       struct mem_cgroup *iter;
+
        /*
         * When a new child is created while the hierarchy is under oom,
         * mem_cgroup_oom_lock() may not be called. We have to use
         * atomic_add_unless() here.
         */
-       atomic_add_unless(&mem->oom_lock, -1, 0);
+       for_each_mem_cgroup_tree(iter, mem)
+               atomic_add_unless(&iter->oom_lock, -1, 0);
        return 0;
 }
 
-static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
-{
-       mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
-}
 
 static DEFINE_MUTEX(memcg_oom_mutex);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
@@ -1462,34 +1571,73 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
 /*
  * Currently used to update mapped file statistics, but the routine can be
  * generalized to update other statistics as well.
+ *
+ * Notes: Race condition
+ *
+ * We usually use page_cgroup_lock() for accessing page_cgroup member but
+ * it tends to be costly. But considering some conditions, we doesn't need
+ * to do so _always_.
+ *
+ * Considering "charge", lock_page_cgroup() is not required because all
+ * file-stat operations happen after a page is attached to radix-tree. There
+ * are no race with "charge".
+ *
+ * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
+ * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
+ * if there are race with "uncharge". Statistics itself is properly handled
+ * by flags.
+ *
+ * Considering "move", this is an only case we see a race. To make the race
+ * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
+ * possibility of race condition. If there is, we take a lock.
  */
-void mem_cgroup_update_file_mapped(struct page *page, int val)
+
+static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
 {
        struct mem_cgroup *mem;
-       struct page_cgroup *pc;
+       struct page_cgroup *pc = lookup_page_cgroup(page);
+       bool need_unlock = false;
 
-       pc = lookup_page_cgroup(page);
        if (unlikely(!pc))
                return;
 
-       lock_page_cgroup(pc);
+       rcu_read_lock();
        mem = pc->mem_cgroup;
-       if (!mem || !PageCgroupUsed(pc))
-               goto done;
+       if (unlikely(!mem || !PageCgroupUsed(pc)))
+               goto out;
+       /* pc->mem_cgroup is unstable ? */
+       if (unlikely(mem_cgroup_stealed(mem))) {
+               /* take a lock against to access pc->mem_cgroup */
+               lock_page_cgroup(pc);
+               need_unlock = true;
+               mem = pc->mem_cgroup;
+               if (!mem || !PageCgroupUsed(pc))
+                       goto out;
+       }
 
-       /*
-        * Preemption is already disabled. We can use __this_cpu_xxx
-        */
-       if (val > 0) {
-               __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               SetPageCgroupFileMapped(pc);
-       } else {
-               __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               ClearPageCgroupFileMapped(pc);
+       this_cpu_add(mem->stat->count[idx], val);
+
+       switch (idx) {
+       case MEM_CGROUP_STAT_FILE_MAPPED:
+               if (val > 0)
+                       SetPageCgroupFileMapped(pc);
+               else if (!page_mapped(page))
+                       ClearPageCgroupFileMapped(pc);
+               break;
+       default:
+               BUG();
        }
 
-done:
-       unlock_page_cgroup(pc);
+out:
+       if (unlikely(need_unlock))
+               unlock_page_cgroup(pc);
+       rcu_read_unlock();
+       return;
+}
+
+void mem_cgroup_update_file_mapped(struct page *page, int val)
+{
+       mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
 }
 
 /*
@@ -1605,15 +1753,55 @@ static void drain_all_stock_sync(void)
        atomic_dec(&memcg_drain_count);
 }
 
-static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+/*
+ * This function drains percpu counter value from DEAD cpu and
+ * move it to local cpu. Note that this function can be preempted.
+ */
+static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
+{
+       int i;
+
+       spin_lock(&mem->pcp_counter_lock);
+       for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+               s64 x = per_cpu(mem->stat->count[i], cpu);
+
+               per_cpu(mem->stat->count[i], cpu) = 0;
+               mem->nocpu_base.count[i] += x;
+       }
+       /* need to clear ON_MOVE value, works as a kind of lock. */
+       per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
+       spin_unlock(&mem->pcp_counter_lock);
+}
+
+static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
+{
+       int idx = MEM_CGROUP_ON_MOVE;
+
+       spin_lock(&mem->pcp_counter_lock);
+       per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
+       spin_unlock(&mem->pcp_counter_lock);
+}
+
+static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
                                        unsigned long action,
                                        void *hcpu)
 {
        int cpu = (unsigned long)hcpu;
        struct memcg_stock_pcp *stock;
+       struct mem_cgroup *iter;
+
+       if ((action == CPU_ONLINE)) {
+               for_each_mem_cgroup_all(iter)
+                       synchronize_mem_cgroup_on_move(iter, cpu);
+               return NOTIFY_OK;
+       }
 
-       if (action != CPU_DEAD)
+       if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
                return NOTIFY_OK;
+
+       for_each_mem_cgroup_all(iter)
+               mem_cgroup_drain_pcp_counter(iter, cpu);
+
        stock = &per_cpu(memcg_stock, cpu);
        drain_stock(stock);
        return NOTIFY_OK;
@@ -3038,6 +3226,7 @@ move_account:
                lru_add_drain_all();
                drain_all_stock_sync();
                ret = 0;
+               mem_cgroup_start_move(mem);
                for_each_node_state(node, N_HIGH_MEMORY) {
                        for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
                                enum lru_list l;
@@ -3051,6 +3240,7 @@ move_account:
                        if (ret)
                                break;
                }
+               mem_cgroup_end_move(mem);
                memcg_oom_recover(mem);
                /* it seems parent cgroup doesn't have enough mem */
                if (ret == -ENOMEM)
@@ -3137,33 +3327,25 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
        return retval;
 }
 
-struct mem_cgroup_idx_data {
-       s64 val;
-       enum mem_cgroup_stat_index idx;
-};
 
-static int
-mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+                               enum mem_cgroup_stat_index idx)
 {
-       struct mem_cgroup_idx_data *d = data;
-       d->val += mem_cgroup_read_stat(mem, d->idx);
-       return 0;
-}
+       struct mem_cgroup *iter;
+       s64 val = 0;
 
-static void
-mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
-                               enum mem_cgroup_stat_index idx, s64 *val)
-{
-       struct mem_cgroup_idx_data d;
-       d.idx = idx;
-       d.val = 0;
-       mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
-       *val = d.val;
+       /* each per cpu's value can be minus.Then, use s64 */
+       for_each_mem_cgroup_tree(iter, mem)
+               val += mem_cgroup_read_stat(iter, idx);
+
+       if (val < 0) /* race ? */
+               val = 0;
+       return val;
 }
 
 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
 {
-       u64 idx_val, val;
+       u64 val;
 
        if (!mem_cgroup_is_root(mem)) {
                if (!swap)
@@ -3172,16 +3354,12 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
                        return res_counter_read_u64(&mem->memsw, RES_USAGE);
        }
 
-       mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
-       val = idx_val;
-       mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
-       val += idx_val;
+       val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
+       val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
 
-       if (swap) {
-               mem_cgroup_get_recursive_idx_stat(mem,
-                               MEM_CGROUP_STAT_SWAPOUT, &idx_val);
-               val += idx_val;
-       }
+       if (swap)
+               val += mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_SWAPOUT);
 
        return val << PAGE_SHIFT;
 }
@@ -3389,9 +3567,9 @@ struct {
 };
 
 
-static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+static void
+mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
 {
-       struct mcs_total_stat *s = data;
        s64 val;
 
        /* per cpu stat */
@@ -3421,13 +3599,15 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
        s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
        val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
        s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
-       return 0;
 }
 
 static void
 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
 {
-       mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+       struct mem_cgroup *iter;
+
+       for_each_mem_cgroup_tree(iter, mem)
+               mem_cgroup_get_local_stat(iter, s);
 }
 
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
@@ -3604,7 +3784,7 @@ static int compare_thresholds(const void *a, const void *b)
        return _a->threshold - _b->threshold;
 }
 
-static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
 {
        struct mem_cgroup_eventfd_list *ev;
 
@@ -3615,7 +3795,10 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
 
 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
 {
-       mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
+       struct mem_cgroup *iter;
+
+       for_each_mem_cgroup_tree(iter, mem)
+               mem_cgroup_oom_notify_cb(iter);
 }
 
 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
@@ -4032,6 +4215,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
                        vfree(mem);
                mem = NULL;
        }
+       spin_lock_init(&mem->pcp_counter_lock);
        return mem;
 }
 
@@ -4158,7 +4342,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                                                &per_cpu(memcg_stock, cpu);
                        INIT_WORK(&stock->work, drain_local_stock);
                }
-               hotcpu_notifier(memcg_stock_cpu_callback, 0);
+               hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
                mem->use_hierarchy = parent->use_hierarchy;
@@ -4513,6 +4697,7 @@ static void mem_cgroup_clear_mc(void)
        mc.to = NULL;
        mc.moving_task = NULL;
        spin_unlock(&mc.lock);
+       mem_cgroup_end_move(from);
        memcg_oom_recover(from);
        memcg_oom_recover(to);
        wake_up_all(&mc.waitq);
@@ -4543,6 +4728,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
                        VM_BUG_ON(mc.moved_charge);
                        VM_BUG_ON(mc.moved_swap);
                        VM_BUG_ON(mc.moving_task);
+                       mem_cgroup_start_move(from);
                        spin_lock(&mc.lock);
                        mc.from = from;
                        mc.to = mem;
index 3ce7bc373a52b7463a156e8ee580851e5afc6a88..3f4854205b16ba0c39a8b815e00fe1b48f3472f1 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -378,6 +378,7 @@ void release_pages(struct page **pages, int nr, int cold)
 
        pagevec_free(&pages_to_free);
 }
+EXPORT_SYMBOL(release_pages);
 
 /*
  * The pages which we're about to release may be in the deferred lru-addition
index 941f2a324d3aed7ef68bb2173eb3f562d49a104a..c1df2dad8c6b58470a51d459bd2263989d1d96f4 100644 (file)
@@ -346,8 +346,8 @@ int garp_request_join(const struct net_device *dev,
                      const struct garp_application *appl,
                      const void *data, u8 len, u8 type)
 {
-       struct garp_port *port = dev->garp_port;
-       struct garp_applicant *app = port->applicants[appl->type];
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
+       struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
        struct garp_attr *attr;
 
        spin_lock_bh(&app->lock);
@@ -366,8 +366,8 @@ void garp_request_leave(const struct net_device *dev,
                        const struct garp_application *appl,
                        const void *data, u8 len, u8 type)
 {
-       struct garp_port *port = dev->garp_port;
-       struct garp_applicant *app = port->applicants[appl->type];
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
+       struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
        struct garp_attr *attr;
 
        spin_lock_bh(&app->lock);
@@ -546,11 +546,11 @@ static int garp_init_port(struct net_device *dev)
 
 static void garp_release_port(struct net_device *dev)
 {
-       struct garp_port *port = dev->garp_port;
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
        unsigned int i;
 
        for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
-               if (port->applicants[i])
+               if (rtnl_dereference(port->applicants[i]))
                        return;
        }
        rcu_assign_pointer(dev->garp_port, NULL);
@@ -565,7 +565,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
 
        ASSERT_RTNL();
 
-       if (!dev->garp_port) {
+       if (!rtnl_dereference(dev->garp_port)) {
                err = garp_init_port(dev);
                if (err < 0)
                        goto err1;
@@ -601,8 +601,8 @@ EXPORT_SYMBOL_GPL(garp_init_applicant);
 
 void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
 {
-       struct garp_port *port = dev->garp_port;
-       struct garp_applicant *app = port->applicants[appl->type];
+       struct garp_port *port = rtnl_dereference(dev->garp_port);
+       struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
 
        ASSERT_RTNL();
 
index 53c8f77f0ccd1025a7061568b551a1d9631c0dd1..978c30b1b36b5a138eeeb7b9780009686f356d6e 100644 (file)
@@ -21,8 +21,8 @@
 #define GARP_ADDR_MAX  0x2F
 #define GARP_ADDR_RANGE        (GARP_ADDR_MAX - GARP_ADDR_MIN)
 
-static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
-static const struct stp_proto *stp_proto __read_mostly;
+static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
+static const struct stp_proto __rcu *stp_proto __read_mostly;
 
 static struct llc_sap *sap __read_mostly;
 static unsigned int sap_registered;
index 05b867e43757dadceaf89d1287b5209a60ca5a65..52077ca22072d6eeab3e12db0030fb5f3c2db68c 100644 (file)
@@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        ASSERT_RTNL();
 
-       grp = real_dev->vlgrp;
+       grp = rtnl_dereference(real_dev->vlgrp);
        BUG_ON(!grp);
 
        /* Take it out of our own structures, but be sure to interlock with
@@ -177,7 +177,7 @@ int register_vlan_dev(struct net_device *dev)
        struct vlan_group *grp, *ngrp = NULL;
        int err;
 
-       grp = real_dev->vlgrp;
+       grp = rtnl_dereference(real_dev->vlgrp);
        if (!grp) {
                ngrp = grp = vlan_group_alloc(real_dev);
                if (!grp)
@@ -385,7 +385,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
        }
 
-       grp = dev->vlgrp;
+       grp = rtnl_dereference(dev->vlgrp);
        if (!grp)
                goto out;
 
index 78b5a89b0f40a455e4229fb04b291e87877b491b..35dfb83184833302e616dca6b0985faeb533897d 100644 (file)
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach);
 
 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 {
-       return ((features & NETIF_F_GEN_CSUM) ||
-               ((features & NETIF_F_IP_CSUM) &&
+       return ((features & NETIF_F_NO_CSUM) ||
+               ((features & NETIF_F_V4_CSUM) &&
                 protocol == htons(ETH_P_IP)) ||
-               ((features & NETIF_F_IPV6_CSUM) &&
+               ((features & NETIF_F_V6_CSUM) &&
                 protocol == htons(ETH_P_IPV6)) ||
                ((features & NETIF_F_FCOE_CRC) &&
                 protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 
 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
 {
+       __be16 protocol = skb->protocol;
        int features = dev->features;
 
-       if (vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb)) {
                features &= dev->vlan_features;
-
-       if (can_checksum_protocol(features, skb->protocol))
-               return true;
-
-       if (skb->protocol == htons(ETH_P_8021Q)) {
+       } else if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-               if (can_checksum_protocol(dev->features & dev->vlan_features,
-                                         veh->h_vlan_encapsulated_proto))
-                       return true;
+               protocol = veh->h_vlan_encapsulated_proto;
+               features &= dev->vlan_features;
        }
 
-       return false;
+       return can_checksum_protocol(features, protocol);
 }
 
 /**
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 }
 
 static DEFINE_PER_CPU(int, xmit_recursion);
-#define RECURSION_LIMIT 3
+#define RECURSION_LIMIT 10
 
 /**
  *     dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 #ifdef CONFIG_RPS
 
 /* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
 /*
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                       struct rps_dev_flow **rflowp)
 {
        struct netdev_rx_queue *rxqueue;
-       struct rps_map *map = NULL;
+       struct rps_map *map;
        struct rps_dev_flow_table *flow_table;
        struct rps_sock_flow_table *sock_flow_table;
        int cpu = -1;
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        } else
                rxqueue = dev->_rx;
 
-       if (rxqueue->rps_map) {
-               map = rcu_dereference(rxqueue->rps_map);
-               if (map && map->len == 1) {
+       map = rcu_dereference(rxqueue->rps_map);
+       if (map) {
+               if (map->len == 1) {
                        tcpu = map->cpus[0];
                        if (cpu_online(tcpu))
                                cpu = tcpu;
                        goto done;
                }
-       } else if (!rxqueue->rps_flow_table) {
+       } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
                goto done;
        }
 
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void)
                /* paranoia */
                BUG_ON(netdev_refcnt_read(dev));
                WARN_ON(rcu_dereference_raw(dev->ip_ptr));
-               WARN_ON(dev->ip6_ptr);
+               WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
                WARN_ON(dev->dn_ptr);
 
                if (dev->destructor)
index 1bc3f253ba6c76efe7e8b97c78c2e6a72d771b23..82a4369ae15091520d99effdcbb36dec7b3bab42 100644 (file)
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 
                list_for_each_entry(r, &ops->rules_list, list) {
                        if (r->pref == rule->target) {
-                               rule->ctarget = r;
+                               RCU_INIT_POINTER(rule->ctarget, r);
                                break;
                        }
                }
 
-               if (rule->ctarget == NULL)
+               if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
                        unresolved = 1;
        } else if (rule->action == FR_ACT_GOTO)
                goto errout_free;
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 
        fib_rule_get(rule);
 
+       if (last)
+               list_add_rcu(&rule->list, &last->list);
+       else
+               list_add_rcu(&rule->list, &ops->rules_list);
+
        if (ops->unresolved_rules) {
                /*
                 * There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                list_for_each_entry(r, &ops->rules_list, list) {
                        if (r->action == FR_ACT_GOTO &&
                            r->target == rule->pref) {
-                               BUG_ON(r->ctarget != NULL);
+                               BUG_ON(rtnl_dereference(r->ctarget) != NULL);
                                rcu_assign_pointer(r->ctarget, rule);
                                if (--ops->unresolved_rules == 0)
                                        break;
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (unresolved)
                ops->unresolved_rules++;
 
-       if (last)
-               list_add_rcu(&rule->list, &last->list);
-       else
-               list_add_rcu(&rule->list, &ops->rules_list);
-
        notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
        flush_route_cache(ops);
        rules_ops_put(ops);
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                 */
                if (ops->nr_goto_rules > 0) {
                        list_for_each_entry(tmp, &ops->rules_list, list) {
-                               if (tmp->ctarget == rule) {
+                               if (rtnl_dereference(tmp->ctarget) == rule) {
                                        rcu_assign_pointer(tmp->ctarget, NULL);
                                        ops->unresolved_rules++;
                                }
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        frh->action = rule->action;
        frh->flags = rule->flags;
 
-       if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
+       if (rule->action == FR_ACT_GOTO &&
+           rcu_dereference_raw(rule->ctarget) == NULL)
                frh->flags |= FIB_RULE_UNRESOLVED;
 
        if (rule->iifname[0]) {
index 7adf50352918713af197b55283faf6b105087f4e..7beaec36b541274bcd1d62c9fe8376b368ab3386 100644 (file)
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        rcu_read_lock_bh();
        filter = rcu_dereference_bh(sk->sk_filter);
        if (filter) {
-               unsigned int pkt_len = sk_run_filter(skb, filter->insns,
-                               filter->len);
+               unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
        rcu_read_unlock_bh();
index b143173e3eb2bd9259e1c0122800848146b4c136..a5ff5a89f376bb1299dec78fa34abe9480ae01f1 100644 (file)
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        }
 
        spin_lock(&rps_map_lock);
-       old_map = queue->rps_map;
+       old_map = rcu_dereference_protected(queue->rps_map,
+                                           lockdep_is_held(&rps_map_lock));
        rcu_assign_pointer(queue->rps_map, map);
        spin_unlock(&rps_map_lock);
 
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
                table = NULL;
 
        spin_lock(&rps_dev_flow_lock);
-       old_table = queue->rps_flow_table;
+       old_table = rcu_dereference_protected(queue->rps_flow_table,
+                                             lockdep_is_held(&rps_dev_flow_lock));
        rcu_assign_pointer(queue->rps_flow_table, table);
        spin_unlock(&rps_dev_flow_lock);
 
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
 {
        struct netdev_rx_queue *queue = to_rx_queue(kobj);
        struct netdev_rx_queue *first = queue->first;
+       struct rps_map *map;
+       struct rps_dev_flow_table *flow_table;
 
-       if (queue->rps_map)
-               call_rcu(&queue->rps_map->rcu, rps_map_release);
 
-       if (queue->rps_flow_table)
-               call_rcu(&queue->rps_flow_table->rcu,
-                   rps_dev_flow_table_release);
+       map = rcu_dereference_raw(queue->rps_map);
+       if (map)
+               call_rcu(&map->rcu, rps_map_release);
+
+       flow_table = rcu_dereference_raw(queue->rps_flow_table);
+       if (flow_table)
+               call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
 
        if (atomic_dec_and_test(&first->count))
                kfree(first);
index c988e685433acad2bb471097d79ec024509cee9d..3f860261c5eea2be402ed0556a3a8b3f6fd7fc9b 100644 (file)
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data)
        BUG_ON(!mutex_is_locked(&net_mutex));
        BUG_ON(id == 0);
 
-       ng = old_ng = net->gen;
+       old_ng = rcu_dereference_protected(net->gen,
+                                          lockdep_is_held(&net_mutex));
+       ng = old_ng;
        if (old_ng->len >= id)
                goto assign;
 
index 2c0df0f95b3d488c8e6f8818857f19c02726e200..679b797d06b1028888bdc9590a1f741918eece6c 100644 (file)
@@ -771,10 +771,10 @@ done:
 static unsigned long num_arg(const char __user * user_buffer,
                             unsigned long maxlen, unsigned long *num)
 {
-       int i = 0;
+       int i;
        *num = 0;
 
-       for (; i < maxlen; i++) {
+       for (i = 0; i < maxlen; i++) {
                char c;
                if (get_user(c, &user_buffer[i]))
                        return -EFAULT;
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer,
 
 static int strn_len(const char __user * user_buffer, unsigned int maxlen)
 {
-       int i = 0;
+       int i;
 
-       for (; i < maxlen; i++) {
+       for (i = 0; i < maxlen; i++) {
                char c;
                if (get_user(c, &user_buffer[i]))
                        return -EFAULT;
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
 {
        struct seq_file *seq = file->private_data;
        struct pktgen_dev *pkt_dev = seq->private;
-       int i = 0, max, len;
+       int i, max, len;
        char name[16], valstr[32];
        unsigned long value = 0;
        char *pg_result = NULL;
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file,
                return -EINVAL;
        }
 
-       max = count - i;
-       tmp = count_trail_chars(&user_buffer[i], max);
+       max = count;
+       tmp = count_trail_chars(user_buffer, max);
        if (tmp < 0) {
                pr_warning("illegal format\n");
                return tmp;
        }
-       i += tmp;
+       i = tmp;
 
        /* Read variable name */
 
@@ -1764,7 +1764,7 @@ static ssize_t pktgen_thread_write(struct file *file,
 {
        struct seq_file *seq = file->private_data;
        struct pktgen_thread *t = seq->private;
-       int i = 0, max, len, ret;
+       int i, max, len, ret;
        char name[40];
        char *pg_result;
 
@@ -1773,12 +1773,12 @@ static ssize_t pktgen_thread_write(struct file *file,
                return -EINVAL;
        }
 
-       max = count - i;
-       len = count_trail_chars(&user_buffer[i], max);
+       max = count;
+       len = count_trail_chars(user_buffer, max);
        if (len < 0)
                return len;
 
-       i += len;
+       i = len;
 
        /* Read variable name */
 
@@ -1975,7 +1975,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
                                                 const char *ifname)
 {
        char b[IFNAMSIZ+5];
-       int i = 0;
+       int i;
 
        for (i = 0; ifname[i] != '@'; i++) {
                if (i == IFNAMSIZ)
@@ -2519,8 +2519,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
 {
        if (pkt_dev->cflows) {
                /* let go of the SAs if we have them */
-               int i = 0;
-               for ( i < pkt_dev->cflows; i++) {
+               int i;
+               for (i = 0; i < pkt_dev->cflows; i++) {
                        struct xfrm_state *x = pkt_dev->flows[i].x;
                        if (x) {
                                xfrm_state_put(x);
index 11db43632df8712576b4ff8f6c8eed7a8fdfd21d..3eed5424e659a1ab130324d16bfe3c0af4ab2feb 100644 (file)
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                sock_reset_flag(newsk, SOCK_DONE);
                skb_queue_head_init(&newsk->sk_error_queue);
 
-               filter = newsk->sk_filter;
+               filter = rcu_dereference_protected(newsk->sk_filter, 1);
                if (filter != NULL)
                        sk_filter_charge(newsk, filter);
 
index 01eee5d984be4b6d838357a56e0211508d0e31ba..385b6095fdc4b1da8540c879d3a5c450954cb7f7 100644 (file)
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 
        mutex_lock(&sock_flow_mutex);
 
-       orig_sock_table = rps_sock_flow_table;
+       orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+                                       lockdep_is_held(&sock_flow_mutex));
        size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
 
        ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
index 43e1c594ce8f38aeae59dbf32dd6dfac747d2c46..b232375a0b75f4f3108284c9bcb4b993b415ebdc 100644 (file)
@@ -120,11 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz,
                struct fib_node *f;
 
                hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
-                       struct hlist_head __rcu *new_head;
+                       struct hlist_head *new_head;
 
                        hlist_del_rcu(&f->fn_hash);
 
-                       new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+                       new_head = rcu_dereference_protected(fz->fz_hash, 1) +
+                                  fn_hash(f->fn_key, fz);
                        hlist_add_head_rcu(&f->fn_hash, new_head);
                }
        }
@@ -179,8 +180,8 @@ static void fn_rehash_zone(struct fn_zone *fz)
                memcpy(&nfz, fz, sizeof(nfz));
 
                write_seqlock_bh(&fz->fz_lock);
-               old_ht = fz->fz_hash;
-               nfz.fz_hash = ht;
+               old_ht = rcu_dereference_protected(fz->fz_hash, 1);
+               RCU_INIT_POINTER(nfz.fz_hash, ht);
                nfz.fz_hashmask = new_hashmask;
                nfz.fz_divisor = new_divisor;
                fn_rebuild_zone(&nfz, old_ht, old_divisor);
@@ -236,7 +237,7 @@ fn_new_zone(struct fn_hash *table, int z)
        seqlock_init(&fz->fz_lock);
        fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
        fz->fz_hashmask = fz->fz_divisor - 1;
-       fz->fz_hash = fz->fz_embedded_hash;
+       RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
        fz->fz_order = z;
        fz->fz_revorder = 32 - z;
        fz->fz_mask = inet_make_mask(z);
@@ -272,7 +273,7 @@ int fib_table_lookup(struct fib_table *tb,
        for (fz = rcu_dereference(t->fn_zone_list);
             fz != NULL;
             fz = rcu_dereference(fz->fz_next)) {
-               struct hlist_head __rcu *head;
+               struct hlist_head *head;
                struct hlist_node *node;
                struct fib_node *f;
                __be32 k;
@@ -282,7 +283,7 @@ int fib_table_lookup(struct fib_table *tb,
                        seq = read_seqbegin(&fz->fz_lock);
                        k = fz_key(flp->fl4_dst, fz);
 
-                       head = &fz->fz_hash[fn_hash(k, fz)];
+                       head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
                        hlist_for_each_entry_rcu(f, node, head, fn_hash) {
                                if (f->fn_key != k)
                                        continue;
@@ -311,6 +312,7 @@ void fib_table_select_default(struct fib_table *tb,
        struct fib_info *last_resort;
        struct fn_hash *t = (struct fn_hash *)tb->tb_data;
        struct fn_zone *fz = t->fn_zones[0];
+       struct hlist_head *head;
 
        if (fz == NULL)
                return;
@@ -320,7 +322,8 @@ void fib_table_select_default(struct fib_table *tb,
        order = -1;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
+       head = rcu_dereference(fz->fz_hash);
+       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
                struct fib_alias *fa;
 
                list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
@@ -374,7 +377,7 @@ out:
 /* Insert node F to FZ. */
 static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
 {
-       struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
 
        hlist_add_head_rcu(&f->fn_hash, head);
 }
@@ -382,7 +385,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
 /* Return the node in FZ matching KEY. */
 static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
 {
-       struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
+       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
        struct hlist_node *node;
        struct fib_node *f;
 
@@ -662,7 +665,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
 
 static int fn_flush_list(struct fn_zone *fz, int idx)
 {
-       struct hlist_head *head = &fz->fz_hash[idx];
+       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
        struct hlist_node *node, *n;
        struct fib_node *f;
        int found = 0;
@@ -761,14 +764,15 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
                   struct fn_zone *fz)
 {
        int h, s_h;
+       struct hlist_head *head = rcu_dereference(fz->fz_hash);
 
-       if (fz->fz_hash == NULL)
+       if (head == NULL)
                return skb->len;
        s_h = cb->args[3];
        for (h = s_h; h < fz->fz_divisor; h++) {
-               if (hlist_empty(&fz->fz_hash[h]))
+               if (hlist_empty(head + h))
                        continue;
-               if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
+               if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
                        cb->args[3] = h;
                        return -1;
                }
@@ -872,7 +876,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
                if (!iter->zone->fz_nent)
                        continue;
 
-               iter->hash_head = iter->zone->fz_hash;
+               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
                maxslot = iter->zone->fz_divisor;
 
                for (iter->bucket = 0; iter->bucket < maxslot;
@@ -957,7 +961,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
                        goto out;
 
                iter->bucket = 0;
-               iter->hash_head = iter->zone->fz_hash;
+               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
 
                hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
                        list_for_each_entry(fa, &fn->fn_alias, fa_list) {
index caea6885fdbd0810ea3e8d9eeefd5f74282a0930..c6933f2ea3105be192c7eaa1ac06045539cd07cc 100644 (file)
@@ -22,7 +22,7 @@
 #include <net/gre.h>
 
 
-static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
 static DEFINE_SPINLOCK(gre_proto_lock);
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
@@ -51,7 +51,8 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
                goto err_out;
 
        spin_lock(&gre_proto_lock);
-       if (gre_proto[version] != proto)
+       if (rcu_dereference_protected(gre_proto[version],
+                       lockdep_is_held(&gre_proto_lock)) != proto)
                goto err_out_unlock;
        rcu_assign_pointer(gre_proto[version], NULL);
        spin_unlock(&gre_proto_lock);
index 9ffa24b9a804c143f611f37b092ccbfaa5df036f..9e94d7cf4f8a977e640efd6720acbbebf32a43f2 100644 (file)
@@ -72,18 +72,19 @@ static struct kmem_cache *peer_cachep __read_mostly;
 #define node_height(x) x->avl_height
 
 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 static const struct inet_peer peer_fake_node = {
-       .avl_left       = peer_avl_empty,
-       .avl_right      = peer_avl_empty,
+       .avl_left       = peer_avl_empty_rcu,
+       .avl_right      = peer_avl_empty_rcu,
        .avl_height     = 0
 };
 
 static struct {
-       struct inet_peer *root;
+       struct inet_peer __rcu *root;
        spinlock_t      lock;
        int             total;
 } peers = {
-       .root           = peer_avl_empty,
+       .root           = peer_avl_empty_rcu,
        .lock           = __SPIN_LOCK_UNLOCKED(peers.lock),
        .total          = 0,
 };
@@ -156,11 +157,14 @@ static void unlink_from_unused(struct inet_peer *p)
  */
 #define lookup(_daddr, _stack)                                         \
 ({                                                             \
-       struct inet_peer *u, **v;                               \
+       struct inet_peer *u;                                    \
+       struct inet_peer __rcu **v;                             \
                                                                \
        stackptr = _stack;                                      \
        *stackptr++ = &peers.root;                              \
-       for (u = peers.root; u != peer_avl_empty; ) {           \
+       for (u = rcu_dereference_protected(peers.root,          \
+                       lockdep_is_held(&peers.lock));          \
+            u != peer_avl_empty; ) {                           \
                if (_daddr == u->v4daddr)                       \
                        break;                                  \
                if ((__force __u32)_daddr < (__force __u32)u->v4daddr)  \
@@ -168,7 +172,8 @@ static void unlink_from_unused(struct inet_peer *p)
                else                                            \
                        v = &u->avl_right;                      \
                *stackptr++ = v;                                \
-               u = *v;                                         \
+               u = rcu_dereference_protected(*v,               \
+                       lockdep_is_held(&peers.lock));          \
        }                                                       \
        u;                                                      \
 })
@@ -209,13 +214,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
 /* Called with local BH disabled and the pool lock held. */
 #define lookup_rightempty(start)                               \
 ({                                                             \
-       struct inet_peer *u, **v;                               \
+       struct inet_peer *u;                                    \
+       struct inet_peer __rcu **v;                             \
        *stackptr++ = &start->avl_left;                         \
        v = &start->avl_left;                                   \
-       for (u = *v; u->avl_right != peer_avl_empty; ) {        \
+       for (u = rcu_dereference_protected(*v,                  \
+                       lockdep_is_held(&peers.lock));          \
+            u->avl_right != peer_avl_empty_rcu; ) {            \
                v = &u->avl_right;                              \
                *stackptr++ = v;                                \
-               u = *v;                                         \
+               u = rcu_dereference_protected(*v,               \
+                       lockdep_is_held(&peers.lock));          \
        }                                                       \
        u;                                                      \
 })
@@ -224,74 +233,86 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
  * Variable names are the proof of operation correctness.
  * Look into mm/map_avl.c for more detail description of the ideas.
  */
-static void peer_avl_rebalance(struct inet_peer **stack[],
-               struct inet_peer ***stackend)
+static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
+               struct inet_peer __rcu ***stackend)
 {
-       struct inet_peer **nodep, *node, *l, *r;
+       struct inet_peer __rcu **nodep;
+       struct inet_peer *node, *l, *r;
        int lh, rh;
 
        while (stackend > stack) {
                nodep = *--stackend;
-               node = *nodep;
-               l = node->avl_left;
-               r = node->avl_right;
+               node = rcu_dereference_protected(*nodep,
+                               lockdep_is_held(&peers.lock));
+               l = rcu_dereference_protected(node->avl_left,
+                               lockdep_is_held(&peers.lock));
+               r = rcu_dereference_protected(node->avl_right,
+                               lockdep_is_held(&peers.lock));
                lh = node_height(l);
                rh = node_height(r);
                if (lh > rh + 1) { /* l: RH+2 */
                        struct inet_peer *ll, *lr, *lrl, *lrr;
                        int lrh;
-                       ll = l->avl_left;
-                       lr = l->avl_right;
+                       ll = rcu_dereference_protected(l->avl_left,
+                               lockdep_is_held(&peers.lock));
+                       lr = rcu_dereference_protected(l->avl_right,
+                               lockdep_is_held(&peers.lock));
                        lrh = node_height(lr);
                        if (lrh <= node_height(ll)) {   /* ll: RH+1 */
-                               node->avl_left = lr;    /* lr: RH or RH+1 */
-                               node->avl_right = r;    /* r: RH */
+                               RCU_INIT_POINTER(node->avl_left, lr);   /* lr: RH or RH+1 */
+                               RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
                                node->avl_height = lrh + 1; /* RH+1 or RH+2 */
-                               l->avl_left = ll;       /* ll: RH+1 */
-                               l->avl_right = node;    /* node: RH+1 or RH+2 */
+                               RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
+                               RCU_INIT_POINTER(l->avl_right, node);   /* node: RH+1 or RH+2 */
                                l->avl_height = node->avl_height + 1;
-                               *nodep = l;
+                               RCU_INIT_POINTER(*nodep, l);
                        } else { /* ll: RH, lr: RH+1 */
-                               lrl = lr->avl_left;     /* lrl: RH or RH-1 */
-                               lrr = lr->avl_right;    /* lrr: RH or RH-1 */
-                               node->avl_left = lrr;   /* lrr: RH or RH-1 */
-                               node->avl_right = r;    /* r: RH */
+                               lrl = rcu_dereference_protected(lr->avl_left,
+                                       lockdep_is_held(&peers.lock));  /* lrl: RH or RH-1 */
+                               lrr = rcu_dereference_protected(lr->avl_right,
+                                       lockdep_is_held(&peers.lock));  /* lrr: RH or RH-1 */
+                               RCU_INIT_POINTER(node->avl_left, lrr);  /* lrr: RH or RH-1 */
+                               RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
                                node->avl_height = rh + 1; /* node: RH+1 */
-                               l->avl_left = ll;       /* ll: RH */
-                               l->avl_right = lrl;     /* lrl: RH or RH-1 */
+                               RCU_INIT_POINTER(l->avl_left, ll);      /* ll: RH */
+                               RCU_INIT_POINTER(l->avl_right, lrl);    /* lrl: RH or RH-1 */
                                l->avl_height = rh + 1; /* l: RH+1 */
-                               lr->avl_left = l;       /* l: RH+1 */
-                               lr->avl_right = node;   /* node: RH+1 */
+                               RCU_INIT_POINTER(lr->avl_left, l);      /* l: RH+1 */
+                               RCU_INIT_POINTER(lr->avl_right, node);  /* node: RH+1 */
                                lr->avl_height = rh + 2;
-                               *nodep = lr;
+                               RCU_INIT_POINTER(*nodep, lr);
                        }
                } else if (rh > lh + 1) { /* r: LH+2 */
                        struct inet_peer *rr, *rl, *rlr, *rll;
                        int rlh;
-                       rr = r->avl_right;
-                       rl = r->avl_left;
+                       rr = rcu_dereference_protected(r->avl_right,
+                               lockdep_is_held(&peers.lock));
+                       rl = rcu_dereference_protected(r->avl_left,
+                               lockdep_is_held(&peers.lock));
                        rlh = node_height(rl);
                        if (rlh <= node_height(rr)) {   /* rr: LH+1 */
-                               node->avl_right = rl;   /* rl: LH or LH+1 */
-                               node->avl_left = l;     /* l: LH */
+                               RCU_INIT_POINTER(node->avl_right, rl);  /* rl: LH or LH+1 */
+                               RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
                                node->avl_height = rlh + 1; /* LH+1 or LH+2 */
-                               r->avl_right = rr;      /* rr: LH+1 */
-                               r->avl_left = node;     /* node: LH+1 or LH+2 */
+                               RCU_INIT_POINTER(r->avl_right, rr);     /* rr: LH+1 */
+                               RCU_INIT_POINTER(r->avl_left, node);    /* node: LH+1 or LH+2 */
                                r->avl_height = node->avl_height + 1;
-                               *nodep = r;
+                               RCU_INIT_POINTER(*nodep, r);
                        } else { /* rr: RH, rl: RH+1 */
-                               rlr = rl->avl_right;    /* rlr: LH or LH-1 */
-                               rll = rl->avl_left;     /* rll: LH or LH-1 */
-                               node->avl_right = rll;  /* rll: LH or LH-1 */
-                               node->avl_left = l;     /* l: LH */
+                               rlr = rcu_dereference_protected(rl->avl_right,
+                                       lockdep_is_held(&peers.lock));  /* rlr: LH or LH-1 */
+                               rll = rcu_dereference_protected(rl->avl_left,
+                                       lockdep_is_held(&peers.lock));  /* rll: LH or LH-1 */
+                               RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
+                               RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
                                node->avl_height = lh + 1; /* node: LH+1 */
-                               r->avl_right = rr;      /* rr: LH */
-                               r->avl_left = rlr;      /* rlr: LH or LH-1 */
+                               RCU_INIT_POINTER(r->avl_right, rr);     /* rr: LH */
+                               RCU_INIT_POINTER(r->avl_left, rlr);     /* rlr: LH or LH-1 */
                                r->avl_height = lh + 1; /* r: LH+1 */
-                               rl->avl_right = r;      /* r: LH+1 */
-                               rl->avl_left = node;    /* node: LH+1 */
+                               RCU_INIT_POINTER(rl->avl_right, r);     /* r: LH+1 */
+                               RCU_INIT_POINTER(rl->avl_left, node);   /* node: LH+1 */
                                rl->avl_height = lh + 2;
-                               *nodep = rl;
+                               RCU_INIT_POINTER(*nodep, rl);
                        }
                } else {
                        node->avl_height = (lh > rh ? lh : rh) + 1;
@@ -303,10 +324,10 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
 #define link_to_pool(n)                                                \
 do {                                                           \
        n->avl_height = 1;                                      \
-       n->avl_left = peer_avl_empty;                           \
-       n->avl_right = peer_avl_empty;                          \
-       smp_wmb(); /* lockless readers can catch us now */      \
-       **--stackptr = n;                                       \
+       n->avl_left = peer_avl_empty_rcu;                       \
+       n->avl_right = peer_avl_empty_rcu;                      \
+       /* lockless readers can catch us now */                 \
+       rcu_assign_pointer(**--stackptr, n);                    \
        peer_avl_rebalance(stack, stackptr);                    \
 } while (0)
 
@@ -330,24 +351,25 @@ static void unlink_from_pool(struct inet_peer *p)
         * We use refcnt=-1 to alert lockless readers this entry is deleted.
         */
        if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
-               struct inet_peer **stack[PEER_MAXDEPTH];
-               struct inet_peer ***stackptr, ***delp;
+               struct inet_peer __rcu **stack[PEER_MAXDEPTH];
+               struct inet_peer __rcu ***stackptr, ***delp;
                if (lookup(p->v4daddr, stack) != p)
                        BUG();
                delp = stackptr - 1; /* *delp[0] == p */
-               if (p->avl_left == peer_avl_empty) {
+               if (p->avl_left == peer_avl_empty_rcu) {
                        *delp[0] = p->avl_right;
                        --stackptr;
                } else {
                        /* look for a node to insert instead of p */
                        struct inet_peer *t;
                        t = lookup_rightempty(p);
-                       BUG_ON(*stackptr[-1] != t);
+                       BUG_ON(rcu_dereference_protected(*stackptr[-1],
+                                       lockdep_is_held(&peers.lock)) != t);
                        **--stackptr = t->avl_left;
                        /* t is removed, t->v4daddr > x->v4daddr for any
                         * x in p->avl_left subtree.
                         * Put t in the old place of p. */
-                       *delp[0] = t;
+                       RCU_INIT_POINTER(*delp[0], t);
                        t->avl_left = p->avl_left;
                        t->avl_right = p->avl_right;
                        t->avl_height = p->avl_height;
@@ -414,7 +436,7 @@ static int cleanup_once(unsigned long ttl)
 struct inet_peer *inet_getpeer(__be32 daddr, int create)
 {
        struct inet_peer *p;
-       struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
+       struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
 
        /* Look up for the address quickly, lockless.
         * Because of a concurrent writer, we might not find an existing entry.
index d0ffcbe369b76b4a000c5f5e23b6e61dc64fb3dd..01087e035b7d6632f9543d36ebfbf025212fc8a9 100644 (file)
@@ -1072,6 +1072,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                        break;
                                }
                                ipgre_tunnel_unlink(ign, t);
+                               synchronize_net();
                                t->parms.iph.saddr = p.iph.saddr;
                                t->parms.iph.daddr = p.iph.daddr;
                                t->parms.i_key = p.i_key;
index 64b70ad162e370dbeb5b60adda5dd811182fc0f5..3948c86e59ca697645a3316145fca66db5096880 100644 (file)
@@ -238,7 +238,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
    but receiver should be enough clever f.e. to forward mtrace requests,
    sent to multicast group to reach destination designated router.
  */
-struct ip_ra_chain *ip_ra_chain;
+struct ip_ra_chain __rcu *ip_ra_chain;
 static DEFINE_SPINLOCK(ip_ra_lock);
 
 
@@ -253,7 +253,8 @@ static void ip_ra_destroy_rcu(struct rcu_head *head)
 int ip_ra_control(struct sock *sk, unsigned char on,
                  void (*destructor)(struct sock *))
 {
-       struct ip_ra_chain *ra, *new_ra, **rap;
+       struct ip_ra_chain *ra, *new_ra;
+       struct ip_ra_chain __rcu **rap;
 
        if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
                return -EINVAL;
@@ -261,7 +262,10 @@ int ip_ra_control(struct sock *sk, unsigned char on,
        new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
 
        spin_lock_bh(&ip_ra_lock);
-       for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
+       for (rap = &ip_ra_chain;
+            (ra = rcu_dereference_protected(*rap,
+                       lockdep_is_held(&ip_ra_lock))) != NULL;
+            rap = &ra->next) {
                if (ra->sk == sk) {
                        if (on) {
                                spin_unlock_bh(&ip_ra_lock);
index e9b816e6cd73a681ea02e9540d6849eb924f4a8a..cd300aaee78f542630f40ab9a34f9b10af8eec9e 100644 (file)
@@ -676,6 +676,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                }
                                t = netdev_priv(dev);
                                ipip_tunnel_unlink(ipn, t);
+                               synchronize_net();
                                t->parms.iph.saddr = p.iph.saddr;
                                t->parms.iph.daddr = p.iph.daddr;
                                memcpy(dev->dev_addr, &p.iph.saddr, 4);
index 65699c24411cfb5cb3ee55428e7e0302feb64716..9ae5c01cd0b2b8da2e243c08d2597dd012e96995 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
+const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
 
 /*
  *     Add a protocol handler to the hash tables
@@ -38,7 +38,8 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
 {
        int hash = protocol & (MAX_INET_PROTOS - 1);
 
-       return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
+       return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+                       NULL, prot) ? 0 : -1;
 }
 EXPORT_SYMBOL(inet_add_protocol);
 
@@ -50,7 +51,8 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
 {
        int ret, hash = protocol & (MAX_INET_PROTOS - 1);
 
-       ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
+       ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+                      prot, NULL) == prot) ? 0 : -1;
 
        synchronize_net();
 
index d6cb2bfcd8e1baf7495e55ce83b534b2b0955b2f..987bf9adb31833c19a0db04ce76060306d8e6994 100644 (file)
@@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = {
  */
 
 struct rt_hash_bucket {
-       struct rtable   *chain;
+       struct rtable __rcu     *chain;
 };
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
        struct rtable *r = NULL;
 
        for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
-               if (!rt_hash_table[st->bucket].chain)
+               if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
                        continue;
                rcu_read_lock_bh();
                r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
 {
        struct rt_cache_iter_state *st = seq->private;
 
-       r = r->dst.rt_next;
+       r = rcu_dereference_bh(r->dst.rt_next);
        while (!r) {
                rcu_read_unlock_bh();
                do {
                        if (--st->bucket < 0)
                                return NULL;
-               } while (!rt_hash_table[st->bucket].chain);
+               } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
                rcu_read_lock_bh();
-               r = rt_hash_table[st->bucket].chain;
+               r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
        }
-       return rcu_dereference_bh(r);
+       return r;
 }
 
 static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -721,19 +721,23 @@ static void rt_do_flush(int process_context)
        for (i = 0; i <= rt_hash_mask; i++) {
                if (process_context && need_resched())
                        cond_resched();
-               rth = rt_hash_table[i].chain;
+               rth = rcu_dereference_raw(rt_hash_table[i].chain);
                if (!rth)
                        continue;
 
                spin_lock_bh(rt_hash_lock_addr(i));
 #ifdef CONFIG_NET_NS
                {
-               struct rtable ** prev, * p;
+               struct rtable __rcu **prev;
+               struct rtable *p;
 
-               rth = rt_hash_table[i].chain;
+               rth = rcu_dereference_protected(rt_hash_table[i].chain,
+                       lockdep_is_held(rt_hash_lock_addr(i)));
 
                /* defer releasing the head of the list after spin_unlock */
-               for (tail = rth; tail; tail = tail->dst.rt_next)
+               for (tail = rth; tail;
+                    tail = rcu_dereference_protected(tail->dst.rt_next,
+                               lockdep_is_held(rt_hash_lock_addr(i))))
                        if (!rt_is_expired(tail))
                                break;
                if (rth != tail)
@@ -741,8 +745,12 @@ static void rt_do_flush(int process_context)
 
                /* call rt_free on entries after the tail requiring flush */
                prev = &rt_hash_table[i].chain;
-               for (p = *prev; p; p = next) {
-                       next = p->dst.rt_next;
+               for (p = rcu_dereference_protected(*prev,
+                               lockdep_is_held(rt_hash_lock_addr(i)));
+                    p != NULL;
+                    p = next) {
+                       next = rcu_dereference_protected(p->dst.rt_next,
+                               lockdep_is_held(rt_hash_lock_addr(i)));
                        if (!rt_is_expired(p)) {
                                prev = &p->dst.rt_next;
                        } else {
@@ -752,14 +760,15 @@ static void rt_do_flush(int process_context)
                }
                }
 #else
-               rth = rt_hash_table[i].chain;
-               rt_hash_table[i].chain = NULL;
+               rth = rcu_dereference_protected(rt_hash_table[i].chain,
+                       lockdep_is_held(rt_hash_lock_addr(i)));
+               rcu_assign_pointer(rt_hash_table[i].chain, NULL);
                tail = NULL;
 #endif
                spin_unlock_bh(rt_hash_lock_addr(i));
 
                for (; rth != tail; rth = next) {
-                       next = rth->dst.rt_next;
+                       next = rcu_dereference_protected(rth->dst.rt_next, 1);
                        rt_free(rth);
                }
        }
@@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        while (aux != rth) {
                if (compare_hash_inputs(&aux->fl, &rth->fl))
                        return 0;
-               aux = aux->dst.rt_next;
+               aux = rcu_dereference_protected(aux->dst.rt_next, 1);
        }
        return ONE;
 }
@@ -799,7 +808,8 @@ static void rt_check_expire(void)
 {
        static unsigned int rover;
        unsigned int i = rover, goal;
-       struct rtable *rth, **rthp;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
        unsigned long samples = 0;
        unsigned long sum = 0, sum2 = 0;
        unsigned long delta;
@@ -825,11 +835,12 @@ static void rt_check_expire(void)
 
                samples++;
 
-               if (*rthp == NULL)
+               if (rcu_dereference_raw(*rthp) == NULL)
                        continue;
                length = 0;
                spin_lock_bh(rt_hash_lock_addr(i));
-               while ((rth = *rthp) != NULL) {
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
                        prefetch(rth->dst.rt_next);
                        if (rt_is_expired(rth)) {
                                *rthp = rth->dst.rt_next;
@@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
        static unsigned long last_gc;
        static int rover;
        static int equilibrium;
-       struct rtable *rth, **rthp;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
        unsigned long now = jiffies;
        int goal;
        int entries = dst_entries_get_fast(&ipv4_dst_ops);
@@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
                        k = (k + 1) & rt_hash_mask;
                        rthp = &rt_hash_table[k].chain;
                        spin_lock_bh(rt_hash_lock_addr(k));
-                       while ((rth = *rthp) != NULL) {
+                       while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
                                if (!rt_is_expired(rth) &&
                                        !rt_may_expire(rth, tmo, expire)) {
                                        tmo >>= 1;
@@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head)
 
        while (rth) {
                length += has_noalias(head, rth);
-               rth = rth->dst.rt_next;
+               rth = rcu_dereference_protected(rth->dst.rt_next, 1);
        }
        return length >> FRACT_BITS;
 }
@@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head)
 static int rt_intern_hash(unsigned hash, struct rtable *rt,
                          struct rtable **rp, struct sk_buff *skb, int ifindex)
 {
-       struct rtable   *rth, **rthp;
+       struct rtable   *rth, *cand;
+       struct rtable __rcu **rthp, **candp;
        unsigned long   now;
-       struct rtable *cand, **candp;
        u32             min_score;
        int             chain_length;
        int attempts = !in_softirq();
@@ -1128,7 +1141,8 @@ restart:
        rthp = &rt_hash_table[hash].chain;
 
        spin_lock_bh(rt_hash_lock_addr(hash));
-       while ((rth = *rthp) != NULL) {
+       while ((rth = rcu_dereference_protected(*rthp,
+                       lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
                if (rt_is_expired(rth)) {
                        *rthp = rth->dst.rt_next;
                        rt_free(rth);
@@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident);
 
 static void rt_del(unsigned hash, struct rtable *rt)
 {
-       struct rtable **rthp, *aux;
+       struct rtable __rcu **rthp;
+       struct rtable *aux;
 
        rthp = &rt_hash_table[hash].chain;
        spin_lock_bh(rt_hash_lock_addr(hash));
        ip_rt_put(rt);
-       while ((aux = *rthp) != NULL) {
+       while ((aux = rcu_dereference_protected(*rthp,
+                       lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
                if (aux == rt || rt_is_expired(aux)) {
                        *rthp = aux->dst.rt_next;
                        rt_free(aux);
@@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
 {
        int i, k;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rth, **rthp;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
        __be32  skeys[2] = { saddr, 0 };
        int  ikeys[2] = { dev->ifindex, 0 };
        struct netevent_redirect netevent;
@@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                        unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
                                                rt_genid(net));
 
-                       rthp=&rt_hash_table[hash].chain;
+                       rthp = &rt_hash_table[hash].chain;
 
                        while ((rth = rcu_dereference(*rthp)) != NULL) {
                                struct rtable *rt;
index 9a17bd2a0a37fdae55aca5aff11324dfa31ac1ca..ac3b3ee4b07c85a32a6f4c35484741c0c3cc7976 100644 (file)
 #include <net/protocol.h>
 #include <net/xfrm.h>
 
-static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
-static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
 static DEFINE_MUTEX(tunnel4_mutex);
 
-static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
+static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
 {
        return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
 }
 
 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
 {
-       struct xfrm_tunnel **pprev;
+       struct xfrm_tunnel __rcu **pprev;
+       struct xfrm_tunnel *t;
+
        int ret = -EEXIST;
        int priority = handler->priority;
 
        mutex_lock(&tunnel4_mutex);
 
-       for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
-               if ((*pprev)->priority > priority)
+       for (pprev = fam_handlers(family);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel4_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority > priority)
                        break;
-               if ((*pprev)->priority == priority)
+               if (t->priority == priority)
                        goto err;
        }
 
@@ -52,13 +57,17 @@ EXPORT_SYMBOL(xfrm4_tunnel_register);
 
 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
 {
-       struct xfrm_tunnel **pprev;
+       struct xfrm_tunnel __rcu **pprev;
+       struct xfrm_tunnel *t;
        int ret = -ENOENT;
 
        mutex_lock(&tunnel4_mutex);
 
-       for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
-               if (*pprev == handler) {
+       for (pprev = fam_handlers(family);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel4_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
                        *pprev = handler->next;
                        ret = 0;
                        break;
index b3f7e8cf18ac4227d298c7d4d659a7d19c50a244..28cb2d733a3cf6a9dd8e65a8dd2de0b44caae782 100644 (file)
@@ -1413,7 +1413,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (sk->sk_filter) {
+       if (rcu_dereference_raw(sk->sk_filter)) {
                if (udp_lib_checksum_complete(skb))
                        goto drop;
        }
index ec7a91d9e86553475555b9df19d22ced2dc5eb31..e048ec62d109f8f2024c113c4aa3c52d984d084c 100644 (file)
@@ -836,7 +836,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
 {
        struct inet6_dev *idev = ifp->idev;
        struct in6_addr addr, *tmpaddr;
-       unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp;
+       unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
        unsigned long regen_advance;
        int tmp_plen;
        int ret = 0;
@@ -886,12 +886,13 @@ retry:
                goto out;
        }
        memcpy(&addr.s6_addr[8], idev->rndid, 8);
+       age = (jiffies - ifp->tstamp) / HZ;
        tmp_valid_lft = min_t(__u32,
                              ifp->valid_lft,
-                             idev->cnf.temp_valid_lft);
+                             idev->cnf.temp_valid_lft + age);
        tmp_prefered_lft = min_t(__u32,
                                 ifp->prefered_lft,
-                                idev->cnf.temp_prefered_lft -
+                                idev->cnf.temp_prefered_lft + age -
                                 idev->cnf.max_desync_factor);
        tmp_plen = ifp->prefix_len;
        max_addresses = idev->cnf.max_addresses;
@@ -1426,8 +1427,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
 {
        struct inet6_dev *idev = ifp->idev;
 
-       if (addrconf_dad_end(ifp))
+       if (addrconf_dad_end(ifp)) {
+               in6_ifa_put(ifp);
                return;
+       }
 
        if (net_ratelimit())
                printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
@@ -2021,10 +2024,11 @@ ok:
                                        ipv6_ifa_notify(0, ift);
                        }
 
-                       if (create && in6_dev->cnf.use_tempaddr > 0) {
+                       if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
                                /*
                                 * When a new public address is created as described in [ADDRCONF],
-                                * also create a new temporary address.
+                                * also create a new temporary address. Also create a temporary
+                                * address if it's enabled but no temporary address currently exists.
                                 */
                                read_unlock_bh(&in6_dev->lock);
                                ipv6_create_tempaddr(ifp, NULL);
index c2c0f89397b1164bacefdb449cd2b97dbe41d66c..2a59610c2a582ed885ae756729b58da00e6f324f 100644 (file)
@@ -1284,6 +1284,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                t = netdev_priv(dev);
 
                        ip6_tnl_unlink(ip6n, t);
+                       synchronize_net();
                        err = ip6_tnl_change(t, &p);
                        ip6_tnl_link(ip6n, t);
                        netdev_state_change(dev);
@@ -1371,6 +1372,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
 }
 
 
index 0553867a317f4466b31df3bd6d2695e180537be9..d1770e061c081de2bed4ad23932ff07291e11a90 100644 (file)
@@ -343,6 +343,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                break;
 
        case IPV6_TRANSPARENT:
+               if (!capable(CAP_NET_ADMIN)) {
+                       retv = -EPERM;
+                       break;
+               }
                if (optlen < sizeof(int))
                        goto e_inval;
                /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
index 44d2eeac089b4fedc3a65c312263b667231838dc..448464844a253474fed5624f75c68c9bb33cd98e 100644 (file)
@@ -5,10 +5,15 @@
 menu "IPv6: Netfilter Configuration"
        depends on INET && IPV6 && NETFILTER
 
+config NF_DEFRAG_IPV6
+       tristate
+       default n
+
 config NF_CONNTRACK_IPV6
        tristate "IPv6 connection tracking support"
        depends on INET && IPV6 && NF_CONNTRACK
        default m if NETFILTER_ADVANCED=n
+       select NF_DEFRAG_IPV6
        ---help---
          Connection tracking keeps a record of what packets have passed
          through your machine, in order to figure out how they are related
index 3f8e4a3d83ce107bdf2280c35fac189f8a2a2838..0a432c9b079556e13d5eb58e70446cd09c72ec22 100644 (file)
@@ -12,11 +12,14 @@ obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv6-objs  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
-nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 
 # l3 independent conntrack
 obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
 
+# defrag
+nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
+obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
index 489d71b844ac9ba7c85d7f5612922e4ceeb712cc..3a3f129a44cb3561dd3e135164626f8352c565b7 100644 (file)
@@ -625,21 +625,24 @@ int nf_ct_frag6_init(void)
        inet_frags_init_net(&nf_init_frags);
        inet_frags_init(&nf_frags);
 
+#ifdef CONFIG_SYSCTL
        nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
                                                          nf_ct_frag6_sysctl_table);
        if (!nf_ct_frag6_sysctl_header) {
                inet_frags_fini(&nf_frags);
                return -ENOMEM;
        }
+#endif
 
        return 0;
 }
 
 void nf_ct_frag6_cleanup(void)
 {
+#ifdef CONFIG_SYSCTL
        unregister_sysctl_table(nf_ct_frag6_sysctl_header);
        nf_ct_frag6_sysctl_header = NULL;
-
+#endif
        inet_frags_fini(&nf_frags);
 
        nf_init_frags.low_thresh = 0;
index 9bb936ae24524362fd5748b42360b2955dc474a9..9a7978fdc02a5de453feb7c4ca1ea9c1e7652292 100644 (file)
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
 
 int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
 {
        int hash = protocol & (MAX_INET_PROTOS - 1);
 
-       return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
+       return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+                       NULL, prot) ? 0 : -1;
 }
 EXPORT_SYMBOL(inet6_add_protocol);
 
@@ -43,7 +44,8 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
 {
        int ret, hash = protocol & (MAX_INET_PROTOS - 1);
 
-       ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
+       ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+                      prot, NULL) == prot) ? 0 : -1;
 
        synchronize_net();
 
index 45e6efb7f17120554bb963d983d74028a2001342..86c39526ba5ecb581e197d245855c9edabb53e99 100644 (file)
@@ -373,7 +373,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
 
 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
 {
-       if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
+       if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
            skb_checksum_complete(skb)) {
                atomic_inc(&sk->sk_drops);
                kfree_skb(skb);
index 367a6cc584ccc40bb1c5a7fc03022bd89f304d10..d6bfaec3bbbf1a91701fd31616757af1a565bb41 100644 (file)
@@ -963,6 +963,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                }
                                t = netdev_priv(dev);
                                ipip6_tunnel_unlink(sitn, t);
+                               synchronize_net();
                                t->parms.iph.saddr = p.iph.saddr;
                                t->parms.iph.daddr = p.iph.daddr;
                                memcpy(dev->dev_addr, &p.iph.saddr, 4);
index d9864725d0c6a259ba8a4d35cbe08762de5e53f4..4f3cec12aa8575a860f18124f0f4ee7f83cf540d 100644 (file)
 #include <net/protocol.h>
 #include <net/xfrm.h>
 
-static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
-static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
 static DEFINE_MUTEX(tunnel6_mutex);
 
 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
 {
-       struct xfrm6_tunnel **pprev;
+       struct xfrm6_tunnel __rcu **pprev;
+       struct xfrm6_tunnel *t;
        int ret = -EEXIST;
        int priority = handler->priority;
 
        mutex_lock(&tunnel6_mutex);
 
        for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
-            *pprev; pprev = &(*pprev)->next) {
-               if ((*pprev)->priority > priority)
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel6_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority > priority)
                        break;
-               if ((*pprev)->priority == priority)
+               if (t->priority == priority)
                        goto err;
        }
 
@@ -65,14 +68,17 @@ EXPORT_SYMBOL(xfrm6_tunnel_register);
 
 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
 {
-       struct xfrm6_tunnel **pprev;
+       struct xfrm6_tunnel __rcu **pprev;
+       struct xfrm6_tunnel *t;
        int ret = -ENOENT;
 
        mutex_lock(&tunnel6_mutex);
 
        for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
-            *pprev; pprev = &(*pprev)->next) {
-               if (*pprev == handler) {
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&tunnel6_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
                        *pprev = handler->next;
                        ret = 0;
                        break;
index c84dad432114ef0d885b29244bf9df0854f750e2..91def93bec85060e7571218c20439cabd4ec824f 100644 (file)
@@ -527,7 +527,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                }
        }
 
-       if (sk->sk_filter) {
+       if (rcu_dereference_raw(sk->sk_filter)) {
                if (udp_lib_checksum_complete(skb))
                        goto drop;
        }
index 1712af1c7b3f7868146d9d5ace84937aaa87f745..c64ce0a0bb03b11c3b28d3445d2abb361fee9070 100644 (file)
@@ -111,6 +111,10 @@ struct l2tp_net {
        spinlock_t l2tp_session_hlist_lock;
 };
 
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
        BUG_ON(!net);
@@ -118,6 +122,34 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
        return net_generic(net, l2tp_net_id);
 }
 
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+       atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+       if (atomic_dec_and_test(&tunnel->ref_count))
+               l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+               printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+               l2tp_tunnel_inc_refcount_1(_t);                         \
+       } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+               printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+               l2tp_tunnel_dec_refcount_1(_t);                         \
+       } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
 /* Session hash global list for L2TPv3.
  * The session_id SHOULD be random according to RFC3931, but several
  * L2TP implementations use incrementing session_ids.  So we do a real
@@ -699,8 +731,8 @@ EXPORT_SYMBOL(l2tp_recv_common);
  * Returns 1 if the packet was not a good data packet and could not be
  * forwarded.  All such packets are passed up to userspace to deal with.
  */
-int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
-                      int (*payload_hook)(struct sk_buff *skb))
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+                             int (*payload_hook)(struct sk_buff *skb))
 {
        struct l2tp_session *session = NULL;
        unsigned char *ptr, *optr;
@@ -812,7 +844,6 @@ error:
 
        return 1;
 }
-EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
 
 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
  * Return codes:
@@ -922,7 +953,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
        return bufp - optr;
 }
 
-int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+                         size_t data_len)
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
        unsigned int len = skb->len;
@@ -970,7 +1002,6 @@ int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t dat
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(l2tp_xmit_core);
 
 /* Automatically called when the skb is freed.
  */
@@ -1089,7 +1120,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
  * The tunnel context is deleted only when all session sockets have been
  * closed.
  */
-void l2tp_tunnel_destruct(struct sock *sk)
+static void l2tp_tunnel_destruct(struct sock *sk)
 {
        struct l2tp_tunnel *tunnel;
 
@@ -1128,11 +1159,10 @@ void l2tp_tunnel_destruct(struct sock *sk)
 end:
        return;
 }
-EXPORT_SYMBOL(l2tp_tunnel_destruct);
 
 /* When the tunnel is closed, all the attached sessions need to go too.
  */
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
 {
        int hash;
        struct hlist_node *walk;
@@ -1193,12 +1223,11 @@ again:
        }
        write_unlock_bh(&tunnel->hlist_lock);
 }
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
 
 /* Really kill the tunnel.
  * Come here only when all sessions have been cleared from the tunnel.
  */
-void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
 {
        struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
 
@@ -1217,7 +1246,6 @@ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
        atomic_dec(&l2tp_tunnel_count);
        kfree(tunnel);
 }
-EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
 
 /* Create a socket for the tunnel, if one isn't set up by
  * userspace. This is used for static tunnels where there is no
@@ -1512,7 +1540,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
 /* We come here whenever a session's send_seq, cookie_len or
  * l2specific_len parameters are set.
  */
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 {
        if (version == L2TP_HDR_VER_2) {
                session->hdr_len = 6;
@@ -1525,7 +1553,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
        }
 
 }
-EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
 
 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
 {
index f0f318edd3f19280f863e2db279663fadb02d968..a16a48e79fab09c84a6825baed089e1a35216b37 100644 (file)
@@ -231,48 +231,15 @@ extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_i
 extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
 extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
 extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 extern void l2tp_session_free(struct l2tp_session *session);
 extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
 extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
 
-extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
 extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-extern void l2tp_tunnel_destruct(struct sock *sk);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 
 extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
 extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
 
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
-       atomic_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
-       if (atomic_dec_and_test(&tunnel->ref_count))
-               l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
-               printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
-               l2tp_tunnel_inc_refcount_1(_t);                         \
-       } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
-               printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
-               l2tp_tunnel_dec_refcount_1(_t);                         \
-       } while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
  */
index 1c770c0644d1a3111eddbac4463ecd8bd4821850..0bf6a59545ab9439f3ab7126539cd9aef419b752 100644 (file)
@@ -576,7 +576,7 @@ out:
        return copied;
 }
 
-struct proto l2tp_ip_prot = {
+static struct proto l2tp_ip_prot = {
        .name              = "L2TP/IP",
        .owner             = THIS_MODULE,
        .init              = l2tp_ip_open,
index ff60c022f51de8b4d2f7dd2e910aa6912a4e780c..239c4836a946601f27c69f965ae1a0935e71b434 100644 (file)
@@ -456,6 +456,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        if (!sta)
                return NULL;
 
+       sta->last_rx = jiffies;
        set_sta_flags(sta, WLAN_STA_AUTHORIZED);
 
        /* make sure mandatory rates are always added */
index 22bc42b18991c2c61c8562f8a5d841e53ed73e37..6b322fa681f55e3a675af149cbd6cfcae7189382 100644 (file)
@@ -748,7 +748,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                hw->queues = IEEE80211_MAX_QUEUES;
 
        local->workqueue =
-               create_singlethread_workqueue(wiphy_name(local->hw.wiphy));
+               alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
        if (!local->workqueue) {
                result = -ENOMEM;
                goto fail_workqueue;
@@ -962,12 +962,6 @@ static void __exit ieee80211_exit(void)
        rc80211_minstrel_ht_exit();
        rc80211_minstrel_exit();
 
-       /*
-        * For key todo, it'll be empty by now but the work
-        * might still be scheduled.
-        */
-       flush_scheduled_work();
-
        if (mesh_allocated)
                ieee80211s_stop();
 
index 809cf230d251bccec01c32b6e60608f19ac32cf3..33f76993da081fcd73c10cad2bf1328c46d83299 100644 (file)
@@ -329,6 +329,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
                 * if needed.
                 */
                for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+                       /* Skip invalid rates */
+                       if (info->control.rates[i].idx < 0)
+                               break;
                        /* Rate masking supports only legacy rates for now */
                        if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
                                continue;
index 43288259f4a145d31374a2c0d629ba3b0d16db0e..1534f2b44cafbf8d465d55229f3ee807cf5f6fb2 100644 (file)
@@ -525,6 +525,7 @@ config NETFILTER_XT_TARGET_TPROXY
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
        select NF_DEFRAG_IPV4
+       select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
          This option adds a `TPROXY' target, which is somewhat similar to
          REDIRECT.  It can only be used in the mangle table and is useful
@@ -927,6 +928,7 @@ config NETFILTER_XT_MATCH_SOCKET
        depends on NETFILTER_ADVANCED
        depends on !NF_CONNTRACK || NF_CONNTRACK
        select NF_DEFRAG_IPV4
+       select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
          This option adds a `socket' match, which can be used to match
          packets for which a TCP or UDP socket lookup finds a valid socket.
index 19c482caf30b7f1587f41cd7b3c26a3d598bc084..640678f47a2ad5420a869e4fbcd63bd677297c2c 100644 (file)
@@ -21,7 +21,9 @@
 #include <linux/netfilter_ipv4/ip_tables.h>
 
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_TPROXY_HAVE_IPV6 1
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
@@ -172,7 +174,7 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
        return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
 
 static inline const struct in6_addr *
 tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
@@ -372,7 +374,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
                .hooks          = 1 << NF_INET_PRE_ROUTING,
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
        {
                .name           = "TPROXY",
                .family         = NFPROTO_IPV6,
@@ -391,7 +393,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
 static int __init tproxy_tg_init(void)
 {
        nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
        nf_defrag_ipv6_enable();
 #endif
 
index 2dbd4c857735abdddde389dd5154bbba0e5f2aef..d94a858dc52a1960f33fd7c1243c04a3428bb110 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/skbuff.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <net/icmp.h>
 #include <net/inet_sock.h>
 #include <net/netfilter/nf_tproxy_core.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_SOCKET_HAVE_IPV6 1
+#include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#endif
 
 #include <linux/netfilter/xt_socket.h>
 
@@ -186,7 +190,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
        return socket_match(skb, par, par->matchinfo);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
 
 static int
 extract_icmp6_fields(const struct sk_buff *skb,
@@ -331,7 +335,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
                                  (1 << NF_INET_LOCAL_IN),
                .me             = THIS_MODULE,
        },
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
        {
                .name           = "socket",
                .revision       = 1,
@@ -348,7 +352,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
 static int __init socket_mt_init(void)
 {
        nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
        nf_defrag_ipv6_enable();
 #endif
 
index cd96ed3ccee4602a9fee20464e4a54d3fb0783b2..478181d53c555dc0d7cf9a18714aa0a08b03f3e1 100644 (file)
@@ -83,9 +83,9 @@ struct netlink_sock {
        struct module           *module;
 };
 
-struct listeners_rcu_head {
-       struct rcu_head rcu_head;
-       void *ptr;
+struct listeners {
+       struct rcu_head         rcu;
+       unsigned long           masks[0];
 };
 
 #define NETLINK_KERNEL_SOCKET  0x1
@@ -119,7 +119,7 @@ struct nl_pid_hash {
 struct netlink_table {
        struct nl_pid_hash hash;
        struct hlist_head mc_list;
-       unsigned long *listeners;
+       struct listeners __rcu *listeners;
        unsigned int nl_nonroot;
        unsigned int groups;
        struct mutex *cb_mutex;
@@ -338,7 +338,7 @@ netlink_update_listeners(struct sock *sk)
                        if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
                                mask |= nlk_sk(sk)->groups[i];
                }
-               tbl->listeners[i] = mask;
+               tbl->listeners->masks[i] = mask;
        }
        /* this function is only called with the netlink table "grabbed", which
         * makes sure updates are visible before bind or setsockopt return. */
@@ -936,7 +936,7 @@ EXPORT_SYMBOL(netlink_unicast);
 int netlink_has_listeners(struct sock *sk, unsigned int group)
 {
        int res = 0;
-       unsigned long *listeners;
+       struct listeners *listeners;
 
        BUG_ON(!netlink_is_kernel(sk));
 
@@ -944,7 +944,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
        listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
 
        if (group - 1 < nl_table[sk->sk_protocol].groups)
-               res = test_bit(group - 1, listeners);
+               res = test_bit(group - 1, listeners->masks);
 
        rcu_read_unlock();
 
@@ -1498,7 +1498,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
        struct socket *sock;
        struct sock *sk;
        struct netlink_sock *nlk;
-       unsigned long *listeners = NULL;
+       struct listeners *listeners = NULL;
 
        BUG_ON(!nl_table);
 
@@ -1523,8 +1523,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
        if (groups < 32)
                groups = 32;
 
-       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
-                           GFP_KERNEL);
+       listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
        if (!listeners)
                goto out_sock_release;
 
@@ -1541,7 +1540,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
        netlink_table_grab();
        if (!nl_table[unit].registered) {
                nl_table[unit].groups = groups;
-               nl_table[unit].listeners = listeners;
+               rcu_assign_pointer(nl_table[unit].listeners, listeners);
                nl_table[unit].cb_mutex = cb_mutex;
                nl_table[unit].module = module;
                nl_table[unit].registered = 1;
@@ -1572,43 +1571,28 @@ netlink_kernel_release(struct sock *sk)
 EXPORT_SYMBOL(netlink_kernel_release);
 
 
-static void netlink_free_old_listeners(struct rcu_head *rcu_head)
+static void listeners_free_rcu(struct rcu_head *head)
 {
-       struct listeners_rcu_head *lrh;
-
-       lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
-       kfree(lrh->ptr);
+       kfree(container_of(head, struct listeners, rcu));
 }
 
 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
 {
-       unsigned long *listeners, *old = NULL;
-       struct listeners_rcu_head *old_rcu_head;
+       struct listeners *new, *old;
        struct netlink_table *tbl = &nl_table[sk->sk_protocol];
 
        if (groups < 32)
                groups = 32;
 
        if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
-               listeners = kzalloc(NLGRPSZ(groups) +
-                                   sizeof(struct listeners_rcu_head),
-                                   GFP_ATOMIC);
-               if (!listeners)
+               new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+               if (!new)
                        return -ENOMEM;
-               old = tbl->listeners;
-               memcpy(listeners, old, NLGRPSZ(tbl->groups));
-               rcu_assign_pointer(tbl->listeners, listeners);
-               /*
-                * Free the old memory after an RCU grace period so we
-                * don't leak it. We use call_rcu() here in order to be
-                * able to call this function from atomic contexts. The
-                * allocation of this memory will have reserved enough
-                * space for struct listeners_rcu_head at the end.
-                */
-               old_rcu_head = (void *)(tbl->listeners +
-                                       NLGRPLONGS(tbl->groups));
-               old_rcu_head->ptr = old;
-               call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
+               old = rcu_dereference_raw(tbl->listeners);
+               memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+               rcu_assign_pointer(tbl->listeners, new);
+
+               call_rcu(&old->rcu, listeners_free_rcu);
        }
        tbl->groups = groups;
 
@@ -2104,18 +2088,17 @@ static void __net_exit netlink_net_exit(struct net *net)
 
 static void __init netlink_add_usersock_entry(void)
 {
-       unsigned long *listeners;
+       struct listeners *listeners;
        int groups = 32;
 
-       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
-                           GFP_KERNEL);
+       listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
        if (!listeners)
-               panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+               panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
 
        netlink_table_grab();
 
        nl_table[NETLINK_USERSOCK].groups = groups;
-       nl_table[NETLINK_USERSOCK].listeners = listeners;
+       rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
 
index d14bbf960c18223a5305b4b1cd33cd68ce49be76..4b9f8912526c7c379e00b0ad2e50de5095a8dd3d 100644 (file)
@@ -1167,7 +1167,7 @@ static int ignore_request(struct wiphy *wiphy,
                                return 0;
                        return -EALREADY;
                }
-               return REG_INTERSECT;
+               return 0;
        case NL80211_REGDOM_SET_BY_DRIVER:
                if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
                        if (regdom_changes(pending_request->alpha2))
index 51a3d381a59e3f47e89f56803538e89c5354116a..9890cf2066ffef53e0773961cd0837d42ee4b740 100644 (file)
@@ -1721,7 +1721,6 @@ printk (KERN_INFO "FKS: es_rec_set_recmask mask = %x\n", mask);
                                left  = value & 0x000000ff;
                                right = (value & 0x0000ff00) >> 8;
                        } else {                                /* Turn it off (3)  */
-                               left  = 0;
                                left  = 0;
                                right = 0;
                        }
index 82ebeb9544fe54296ab3043d9a25e34cee04f415..93fa59cc60ef9ba0f5b2315c6461d3966608994c 100644 (file)
@@ -5326,6 +5326,82 @@ again:
        return 0;
 }
 
+static int stac92hd83xxx_set_system_btl_amp(struct hda_codec *codec)
+{
+       if (codec->vendor_id != 0x111d7605 &&
+           codec->vendor_id != 0x111d76d1)
+               return 0;
+
+       switch (codec->subsystem_id) {
+       case 0x103c1618:
+       case 0x103c1619:
+       case 0x103c161a:
+       case 0x103c161b:
+       case 0x103c161c:
+       case 0x103c161d:
+       case 0x103c161e:
+       case 0x103c161f:
+       case 0x103c1620:
+       case 0x103c1621:
+       case 0x103c1622:
+       case 0x103c1623:
+
+       case 0x103c162a:
+       case 0x103c162b:
+
+       case 0x103c1630:
+       case 0x103c1631:
+
+       case 0x103c1633:
+
+       case 0x103c1635:
+
+       case 0x103c164f:
+
+       case 0x103c1676:
+       case 0x103c1677:
+       case 0x103c1678:
+       case 0x103c1679:
+       case 0x103c167a:
+       case 0x103c167b:
+       case 0x103c167c:
+       case 0x103c167d:
+       case 0x103c167e:
+       case 0x103c167f:
+       case 0x103c1680:
+       case 0x103c1681:
+       case 0x103c1682:
+       case 0x103c1683:
+       case 0x103c1684:
+       case 0x103c1685:
+       case 0x103c1686:
+       case 0x103c1687:
+       case 0x103c1688:
+       case 0x103c1689:
+       case 0x103c168a:
+       case 0x103c168b:
+       case 0x103c168c:
+       case 0x103c168d:
+       case 0x103c168e:
+       case 0x103c168f:
+       case 0x103c1690:
+       case 0x103c1691:
+       case 0x103c1692:
+
+       case 0x103c3587:
+       case 0x103c3588:
+       case 0x103c3589:
+       case 0x103c358a:
+
+       case 0x103c3667:
+       case 0x103c3668:
+               /* set BTL amp level to 13.43dB for louder speaker output */
+               return snd_hda_codec_write_cache(codec, codec->afg, 0,
+                                                0x7F4, 0x14);
+       }
+       return 0;
+}
+
 static int patch_stac92hd83xxx(struct hda_codec *codec)
 {
        struct sigmatel_spec *spec;
@@ -5452,6 +5528,8 @@ again:
                        AC_VERB_SET_CONNECT_SEL, num_dacs);
        }
 
+       stac92hd83xxx_set_system_btl_amp(codec);
+
        codec->proc_widget_hook = stac92hd_proc_hook;
 
        return 0;
index c53955fe17b65875e8354063e897c6501edc7b50..de799cd1ba727e7b1fa45cad05f5e7aa9d0d80d3 100644 (file)
@@ -47,7 +47,7 @@ static int ad73311_probe(struct platform_device *pdev)
                        &soc_codec_dev_ad73311, &ad73311_dai, 1);
 }
 
-static int ad73311_remove(struct platform_device *pdev)
+static int __devexit ad73311_remove(struct platform_device *pdev)
 {
        snd_soc_unregister_codec(&pdev->dev);
        return 0;
index e7a40d16df905748c74fe385618c9b41c6d5ad44..bc22ee93a75daf3c852ce65f9767df519445251c 100644 (file)
@@ -2051,7 +2051,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
        return ret;
 }
 
-static int max98088_i2c_remove(struct i2c_client *client)
+static int __devexit max98088_i2c_remove(struct i2c_client *client)
 {
        snd_soc_unregister_codec(&client->dev);
        kfree(i2c_get_clientdata(client));
index 7a1825418ee49a47d0d4bcbbb8ce8b28d298b143..99c046ba46bb6ed637d356579a2f4af9d1841bc3 100644 (file)
@@ -665,7 +665,7 @@ static int wm9090_i2c_probe(struct i2c_client *i2c,
        return ret;
 }
 
-static int wm9090_i2c_remove(struct i2c_client *i2c)
+static int __devexit wm9090_i2c_remove(struct i2c_client *i2c)
 {
        struct wm9090_priv *wm9090 = i2c_get_clientdata(i2c);
 
index fe15bb26e48474e6f9aacb15961d62b59cea704d..25f27ec1dd6e4bba2590b869745d575c1d5b3545 100644 (file)
@@ -24,7 +24,6 @@
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-of-simple.h>
 
 #include "mpc5200_dma.h"
 #include "mpc5200_psc_ac97.h"
@@ -49,7 +48,7 @@ static struct snd_soc_dai_link pcm030_fabric_dai[] = {
        .codec_dai_name = "wm9712-aux",
        .cpu_dai_name = "mpc5200-psc-ac97.1",
        .platform_name = "mpc5200-pcm-audio",
-       ..codec_name = "wm9712-codec",
+       .codec_name = "wm9712-codec",
 },
 };
 
index 1febf2f23754afc63eb5469450ac8da6a97f23b3..ae4251d5abf7bc9a1b9c15dc9d5d8e947627337a 100644 (file)
@@ -62,12 +62,14 @@ struct snd_usb_substream {
        unsigned int syncinterval;  /* P for adaptive mode, 0 otherwise */
        unsigned int freqn;      /* nominal sampling rate in fs/fps in Q16.16 format */
        unsigned int freqm;      /* momentary sampling rate in fs/fps in Q16.16 format */
+       int          freqshift;  /* how much to shift the feedback value to get Q16.16 */
        unsigned int freqmax;    /* maximum sampling rate, used for buffer management */
        unsigned int phase;      /* phase accumulator */
        unsigned int maxpacksize;       /* max packet size in bytes */
        unsigned int maxframesize;      /* max packet size in frames */
        unsigned int curpacksize;       /* current packet size in bytes (for capture) */
        unsigned int curframesize;      /* current packet size in frames (for capture) */
+       unsigned int syncmaxsize;       /* sync endpoint packet size */
        unsigned int fill_max: 1;       /* fill max packet size always */
        unsigned int txfr_quirk:1;      /* allow sub-frame alignment */
        unsigned int fmt_type;          /* USB audio format type (1-3) */
index f49756c1b83709716d8a60a8341dccb7c5f10f4f..cff3a3c465d736f17d73a4cf6c01c54c936469c3 100644 (file)
@@ -237,6 +237,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        subs->datainterval = fmt->datainterval;
        subs->syncpipe = subs->syncinterval = 0;
        subs->maxpacksize = fmt->maxpacksize;
+       subs->syncmaxsize = 0;
        subs->fill_max = 0;
 
        /* we need a sync pipe in async OUT or adaptive IN mode */
@@ -283,6 +284,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                        subs->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
                else
                        subs->syncinterval = 3;
+               subs->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
        }
 
        /* always fill max packet size */
index 3c650ab3c91de8e2edb87e3e0b03ce716b17d894..961c9a2506865b9a84cb337606e80b21705f2b04 100644 (file)
@@ -132,6 +132,11 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
                            ? get_full_speed_hz(subs->freqm)
                            : get_high_speed_hz(subs->freqm),
                            subs->freqm >> 16, subs->freqm & 0xffff);
+               if (subs->freqshift != INT_MIN)
+                       snd_iprintf(buffer, "    Feedback Format = %d.%d\n",
+                                   (subs->syncmaxsize > 3 ? 32 : 24)
+                                               - (16 - subs->freqshift),
+                                   16 - subs->freqshift);
        } else {
                snd_iprintf(buffer, "  Status: Stop\n");
        }
index 8deeaad10f10caa4827288cc41230ce1cc6eb77e..e184349aee83f1e5a790deb43238186dab2b3355 100644 (file)
@@ -225,6 +225,7 @@ int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
        else
                subs->freqn = get_usb_high_speed_rate(rate);
        subs->freqm = subs->freqn;
+       subs->freqshift = INT_MIN;
        /* calculate max. frequency */
        if (subs->maxpacksize) {
                /* whatever fits into a max. size packet */
@@ -513,11 +514,10 @@ static int retire_paused_capture_urb(struct snd_usb_substream *subs,
 
 
 /*
- * prepare urb for full speed playback sync pipe
+ * prepare urb for playback sync pipe
  *
  * set up the offset and length to receive the current frequency.
  */
-
 static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
                                     struct snd_pcm_runtime *runtime,
                                     struct urb *urb)
@@ -525,103 +525,78 @@ static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
        struct snd_urb_ctx *ctx = urb->context;
 
        urb->dev = ctx->subs->dev; /* we need to set this at each time */
-       urb->iso_frame_desc[0].length = 3;
+       urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
        urb->iso_frame_desc[0].offset = 0;
        return 0;
 }
 
 /*
- * prepare urb for high speed playback sync pipe
+ * process after playback sync complete
  *
- * set up the offset and length to receive the current frequency.
- */
-
-static int prepare_playback_sync_urb_hs(struct snd_usb_substream *subs,
-                                       struct snd_pcm_runtime *runtime,
-                                       struct urb *urb)
-{
-       struct snd_urb_ctx *ctx = urb->context;
-
-       urb->dev = ctx->subs->dev; /* we need to set this at each time */
-       urb->iso_frame_desc[0].length = 4;
-       urb->iso_frame_desc[0].offset = 0;
-       return 0;
-}
-
-/*
- * process after full speed playback sync complete
- *
- * retrieve the current 10.14 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
+ * Full speed devices report feedback values in 10.14 format as samples per
+ * frame, high speed devices in 16.16 format as samples per microframe.
+ * Because the Audio Class 1 spec was written before USB 2.0, many high speed
+ * devices use a wrong interpretation, some others use an entirely different
+ * format.  Therefore, we cannot predict what format any particular device uses
+ * and must detect it automatically.
  */
 static int retire_playback_sync_urb(struct snd_usb_substream *subs,
                                    struct snd_pcm_runtime *runtime,
                                    struct urb *urb)
 {
        unsigned int f;
+       int shift;
        unsigned long flags;
 
-       if (urb->iso_frame_desc[0].status == 0 &&
-           urb->iso_frame_desc[0].actual_length == 3) {
-               f = combine_triple((u8*)urb->transfer_buffer) << 2;
-               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
-                       spin_lock_irqsave(&subs->lock, flags);
-                       subs->freqm = f;
-                       spin_unlock_irqrestore(&subs->lock, flags);
-               }
-       }
-
-       return 0;
-}
+       if (urb->iso_frame_desc[0].status != 0 ||
+           urb->iso_frame_desc[0].actual_length < 3)
+               return 0;
 
-/*
- * process after high speed playback sync complete
- *
- * retrieve the current 12.13 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
- */
-static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs,
-                                      struct snd_pcm_runtime *runtime,
-                                      struct urb *urb)
-{
-       unsigned int f;
-       unsigned long flags;
+       f = le32_to_cpup(urb->transfer_buffer);
+       if (urb->iso_frame_desc[0].actual_length == 3)
+               f &= 0x00ffffff;
+       else
+               f &= 0x0fffffff;
+       if (f == 0)
+               return 0;
 
-       if (urb->iso_frame_desc[0].status == 0 &&
-           urb->iso_frame_desc[0].actual_length == 4) {
-               f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
-               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
-                       spin_lock_irqsave(&subs->lock, flags);
-                       subs->freqm = f;
-                       spin_unlock_irqrestore(&subs->lock, flags);
+       if (unlikely(subs->freqshift == INT_MIN)) {
+               /*
+                * The first time we see a feedback value, determine its format
+                * by shifting it left or right until it matches the nominal
+                * frequency value.  This assumes that the feedback does not
+                * differ from the nominal value more than +50% or -25%.
+                */
+               shift = 0;
+               while (f < subs->freqn - subs->freqn / 4) {
+                       f <<= 1;
+                       shift++;
+               }
+               while (f > subs->freqn + subs->freqn / 2) {
+                       f >>= 1;
+                       shift--;
                }
+               subs->freqshift = shift;
        }
+       else if (subs->freqshift >= 0)
+               f <<= subs->freqshift;
+       else
+               f >>= -subs->freqshift;
 
-       return 0;
-}
-
-/*
- * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete
- *
- * These devices return the number of samples per packet instead of the number
- * of samples per microframe.
- */
-static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs,
-                                          struct snd_pcm_runtime *runtime,
-                                          struct urb *urb)
-{
-       unsigned int f;
-       unsigned long flags;
-
-       if (urb->iso_frame_desc[0].status == 0 &&
-           urb->iso_frame_desc[0].actual_length == 4) {
-               f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
-               f >>= subs->datainterval;
-               if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
-                       spin_lock_irqsave(&subs->lock, flags);
-                       subs->freqm = f;
-                       spin_unlock_irqrestore(&subs->lock, flags);
-               }
+       if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
+               /*
+                * If the frequency looks valid, set it.
+                * This value is referred to in prepare_playback_urb().
+                */
+               spin_lock_irqsave(&subs->lock, flags);
+               subs->freqm = f;
+               spin_unlock_irqrestore(&subs->lock, flags);
+       } else {
+               /*
+                * Out of range; maybe the shift value is wrong.
+                * Reset it so that we autodetect again the next time.
+                */
+               subs->freqshift = INT_MIN;
        }
 
        return 0;
@@ -878,21 +853,6 @@ static struct snd_urb_ops audio_urb_ops[2] = {
        },
 };
 
-static struct snd_urb_ops audio_urb_ops_high_speed[2] = {
-       {
-               .prepare =      prepare_nodata_playback_urb,
-               .retire =       retire_playback_urb,
-               .prepare_sync = prepare_playback_sync_urb_hs,
-               .retire_sync =  retire_playback_sync_urb_hs,
-       },
-       {
-               .prepare =      prepare_capture_urb,
-               .retire =       retire_capture_urb,
-               .prepare_sync = prepare_capture_sync_urb_hs,
-               .retire_sync =  retire_capture_sync_urb,
-       },
-};
-
 /*
  * initialize the substream instance.
  */
@@ -909,23 +869,9 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
        subs->direction = stream;
        subs->dev = as->chip->dev;
        subs->txfr_quirk = as->chip->txfr_quirk;
-       if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
-               subs->ops = audio_urb_ops[stream];
-       } else {
-               subs->ops = audio_urb_ops_high_speed[stream];
-               switch (as->chip->usb_id) {
-               case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
-               case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
-               case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
-                       subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
-                       break;
-               case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra 8  */
-               case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
-                       subs->ops.prepare_sync = prepare_playback_sync_urb;
-                       subs->ops.retire_sync = retire_playback_sync_urb;
-                       break;
-               }
-       }
+       subs->ops = audio_urb_ops[stream];
+       if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
+               subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
 
        snd_usb_set_pcm_ops(as->pcm, stream);
 
index 43e3dd284b90f73bc76d28e6fa738ff295482f41..399751befeed923eec1ed9e3d937d755f776a559 100644 (file)
@@ -15,6 +15,23 @@ DESCRIPTION
 This command displays the symbolic event types which can be selected in the
 various perf commands with the -e option.
 
+EVENT MODIFIERS
+---------------
+
+Events can optionally have a modifer by appending a colon and one or
+more modifiers.  Modifiers allow the user to restrict when events are
+counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor.
+
+The 'p' modifier can be used for specifying how precise the instruction
+address should be. The 'p' modifier is currently only implemented for
+Intel PEBS and can be specified multiple times:
+  0 - SAMPLE_IP can have arbitrary skid
+  1 - SAMPLE_IP must have constant skid
+  2 - SAMPLE_IP requested to have 0 skid
+  3 - SAMPLE_IP must have 0 skid
+
+The PEBS implementation now supports up to 2.
+
 RAW HARDWARE EVENT DESCRIPTOR
 -----------------------------
 Even when an event is not available in a symbolic form within perf right now,
index 27d52dae5a43aa842234ba6d6596ca7ce89e9e1e..62de1b7f4e760367337042c52760e7a49ded50c7 100644 (file)
@@ -16,7 +16,9 @@ or
 or
 'perf probe' --list
 or
-'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+'perf probe' [options] --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+or
+'perf probe' [options] --vars='PROBEPOINT'
 
 DESCRIPTION
 -----------
@@ -31,6 +33,11 @@ OPTIONS
 --vmlinux=PATH::
        Specify vmlinux path which has debuginfo (Dwarf binary).
 
+-m::
+--module=MODNAME::
+       Specify module name in which perf-probe searches probe points
+       or lines.
+
 -s::
 --source=PATH::
        Specify path to kernel source.
@@ -57,6 +64,15 @@ OPTIONS
        Show source code lines which can be probed. This needs an argument
        which specifies a range of the source code. (see LINE SYNTAX for detail)
 
+-V::
+--vars=::
+       Show available local variables at given probe point. The argument
+       syntax is same as PROBE SYNTAX, but NO ARGs.
+
+--externs::
+       (Only for --vars) Show external defined variables in addition to local
+       variables.
+
 -f::
 --force::
        Forcibly add events with existing name.
index 3ee27dccfde97d6c58f562bd890a6df9c6026d25..a91f9f9e6e5c27f96623fd10061f12a3041926b6 100644 (file)
@@ -83,6 +83,10 @@ OPTIONS
 --call-graph::
        Do call-graph (stack chain/backtrace) recording.
 
+-q::
+--quiet::
+       Don't print any message, useful for scripting.
+
 -v::
 --verbose::
        Be more verbose (show counter open errors, etc).
index 199d5e19554f62fcfc3d6c960c115edb6f7c0535..2e000c068cc5a377d87923bb302a383abafd3a33 100644 (file)
@@ -50,14 +50,17 @@ static struct {
        bool list_events;
        bool force_add;
        bool show_lines;
+       bool show_vars;
+       bool show_ext_vars;
+       bool mod_events;
        int nevents;
        struct perf_probe_event events[MAX_PROBES];
        struct strlist *dellist;
        struct line_range line_range;
+       const char *target_module;
        int max_probe_points;
 } params;
 
-
 /* Parse an event definition. Note that any error must die. */
 static int parse_probe_event(const char *str)
 {
@@ -92,6 +95,7 @@ static int parse_probe_event_argv(int argc, const char **argv)
        len = 0;
        for (i = 0; i < argc; i++)
                len += sprintf(&buf[len], "%s ", argv[i]);
+       params.mod_events = true;
        ret = parse_probe_event(buf);
        free(buf);
        return ret;
@@ -100,9 +104,10 @@ static int parse_probe_event_argv(int argc, const char **argv)
 static int opt_add_probe_event(const struct option *opt __used,
                              const char *str, int unset __used)
 {
-       if (str)
+       if (str) {
+               params.mod_events = true;
                return parse_probe_event(str);
-       else
+       else
                return 0;
 }
 
@@ -110,6 +115,7 @@ static int opt_del_probe_event(const struct option *opt __used,
                               const char *str, int unset __used)
 {
        if (str) {
+               params.mod_events = true;
                if (!params.dellist)
                        params.dellist = strlist__new(true, NULL);
                strlist__add(params.dellist, str);
@@ -130,6 +136,25 @@ static int opt_show_lines(const struct option *opt __used,
 
        return ret;
 }
+
+static int opt_show_vars(const struct option *opt __used,
+                        const char *str, int unset __used)
+{
+       struct perf_probe_event *pev = &params.events[params.nevents];
+       int ret;
+
+       if (!str)
+               return 0;
+
+       ret = parse_probe_event(str);
+       if (!ret && pev->nargs != 0) {
+               pr_err("  Error: '--vars' doesn't accept arguments.\n");
+               return -EINVAL;
+       }
+       params.show_vars = true;
+
+       return ret;
+}
 #endif
 
 static const char * const probe_usage[] = {
@@ -138,7 +163,8 @@ static const char * const probe_usage[] = {
        "perf probe [<options>] --del '[GROUP:]EVENT' ...",
        "perf probe --list",
 #ifdef DWARF_SUPPORT
-       "perf probe --line 'LINEDESC'",
+       "perf probe [<options>] --line 'LINEDESC'",
+       "perf probe [<options>] --vars 'PROBEPOINT'",
 #endif
        NULL
 };
@@ -180,10 +206,17 @@ static const struct option options[] = {
        OPT_CALLBACK('L', "line", NULL,
                     "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
                     "Show source code lines.", opt_show_lines),
+       OPT_CALLBACK('V', "vars", NULL,
+                    "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
+                    "Show accessible variables on PROBEDEF", opt_show_vars),
+       OPT_BOOLEAN('\0', "externs", &params.show_ext_vars,
+                   "Show external variables too (with --vars only)"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_STRING('s', "source", &symbol_conf.source_prefix,
                   "directory", "path to kernel source"),
+       OPT_STRING('m', "module", &params.target_module,
+                  "modname", "target module name"),
 #endif
        OPT__DRY_RUN(&probe_event_dry_run),
        OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
@@ -217,7 +250,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                usage_with_options(probe_usage, options);
 
        if (params.list_events) {
-               if (params.nevents != 0 || params.dellist) {
+               if (params.mod_events) {
                        pr_err("  Error: Don't use --list with --add/--del.\n");
                        usage_with_options(probe_usage, options);
                }
@@ -225,6 +258,10 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
                        pr_err("  Error: Don't use --list with --line.\n");
                        usage_with_options(probe_usage, options);
                }
+               if (params.show_vars) {
+                       pr_err(" Error: Don't use --list with --vars.\n");
+                       usage_with_options(probe_usage, options);
+               }
                ret = show_perf_probe_events();
                if (ret < 0)
                        pr_err("  Error: Failed to show event list. (%d)\n",
@@ -234,17 +271,35 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
 
 #ifdef DWARF_SUPPORT
        if (params.show_lines) {
-               if (params.nevents != 0 || params.dellist) {
-                       pr_warning("  Error: Don't use --line with"
-                                  " --add/--del.\n");
+               if (params.mod_events) {
+                       pr_err("  Error: Don't use --line with"
+                              " --add/--del.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               if (params.show_vars) {
+                       pr_err(" Error: Don't use --line with --vars.\n");
                        usage_with_options(probe_usage, options);
                }
 
-               ret = show_line_range(&params.line_range);
+               ret = show_line_range(&params.line_range, params.target_module);
                if (ret < 0)
                        pr_err("  Error: Failed to show lines. (%d)\n", ret);
                return ret;
        }
+       if (params.show_vars) {
+               if (params.mod_events) {
+                       pr_err("  Error: Don't use --vars with"
+                              " --add/--del.\n");
+                       usage_with_options(probe_usage, options);
+               }
+               ret = show_available_vars(params.events, params.nevents,
+                                         params.max_probe_points,
+                                         params.target_module,
+                                         params.show_ext_vars);
+               if (ret < 0)
+                       pr_err("  Error: Failed to show vars. (%d)\n", ret);
+               return ret;
+       }
 #endif
 
        if (params.dellist) {
@@ -258,8 +313,9 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
 
        if (params.nevents) {
                ret = add_perf_probe_events(params.events, params.nevents,
-                                           params.force_add,
-                                           params.max_probe_points);
+                                           params.max_probe_points,
+                                           params.target_module,
+                                           params.force_add);
                if (ret < 0) {
                        pr_err("  Error: Failed to add events. (%d)\n", ret);
                        return ret;
index ff77b805de71ac1b0d058a1489efd34797b9a004..4e75583ddd6d9605b96256264c4e5ca900670d3e 100644 (file)
@@ -353,7 +353,7 @@ try_again:
                }
 
                if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
-                       perror("Unable to read perf file descriptor\n");
+                       perror("Unable to read perf file descriptor");
                        exit(-1);
                }
 
@@ -626,7 +626,7 @@ static int __cmd_record(int argc, const char **argv)
 
        nr_cpus = read_cpu_map(cpu_list);
        if (nr_cpus < 1) {
-               perror("failed to collect number of CPUs\n");
+               perror("failed to collect number of CPUs");
                return -1;
        }
 
@@ -761,6 +761,9 @@ static int __cmd_record(int argc, const char **argv)
                }
        }
 
+       if (quiet)
+               return 0;
+
        fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 
        /*
@@ -820,6 +823,7 @@ static const struct option options[] = {
                    "do call-graph (stack chain/backtrace) recording"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
+       OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
        OPT_BOOLEAN('s', "stat", &inherit_stat,
                    "per thread counts"),
        OPT_BOOLEAN('d', "data", &sample_address,
index 40a6a2992d15b286a883ebbbb0f5dd4c10d0e6c5..2f8df45c4dcbfe306080354dbfcfb073ac3ed0a0 100644 (file)
@@ -46,9 +46,6 @@ static struct scripting_ops   *scripting_ops;
 
 static void setup_scripting(void)
 {
-       /* make sure PERF_EXEC_PATH is set for scripts */
-       perf_set_argv_exec_path(perf_exec_path());
-
        setup_perl_scripting();
        setup_python_scripting();
 
@@ -285,7 +282,7 @@ static int parse_scriptname(const struct option *opt __used,
                script++;
        } else {
                script = str;
-               ext = strchr(script, '.');
+               ext = strrchr(script, '.');
                if (!ext) {
                        fprintf(stderr, "invalid script extension");
                        return -1;
@@ -593,6 +590,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
                suffix = REPORT_SUFFIX;
        }
 
+       /* make sure PERF_EXEC_PATH is set for scripts */
+       perf_set_argv_exec_path(perf_exec_path());
+
        if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
                char *record_script_path, *report_script_path;
                int live_pipe[2];
@@ -625,12 +625,13 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
                        dup2(live_pipe[1], 1);
                        close(live_pipe[0]);
 
-                       __argv = malloc(5 * sizeof(const char *));
+                       __argv = malloc(6 * sizeof(const char *));
                        __argv[0] = "/bin/sh";
                        __argv[1] = record_script_path;
-                       __argv[2] = "-o";
-                       __argv[3] = "-";
-                       __argv[4] = NULL;
+                       __argv[2] = "-q";
+                       __argv[3] = "-o";
+                       __argv[4] = "-";
+                       __argv[5] = NULL;
 
                        execvp("/bin/sh", (char **)__argv);
                        exit(-1);
index e3a5e55d54ff6706f620e9517d1a33f4b991364c..4028d92dc4ae6602927d3c82c02f3973506d995e 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/failed-syscalls.pl $comm
index d83070b7eeb536d2afa455601d6e8533bbd5694a..ba25f4d41fb02a1d40303ebcdb8ba8f17ff8470d 100644 (file)
@@ -7,7 +7,7 @@ if [ $# -lt 1 ] ; then
 fi
 comm=$1
 shift
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-file.pl $comm
 
 
 
index 7ef46983f62f471145f924a6901b5c1e75a68572..641a3f5d085c6148e9437e10704f6953d7d1eca5 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # description: system-wide r/w activity
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-pid.pl
 
 
 
index 93e698cd3f382e79b93ffc90db7aac8162dbc529..4918dba77021e676fdfeaf6c7d3e90105979ccc8 100644 (file)
@@ -17,7 +17,7 @@ if [ "$n_args" -gt 0 ] ; then
     interval=$1
     shift
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rwtop.pl $interval
 
 
 
index a0d898f9ca1d2c87c0a46e475483dc58101f4305..49052ebcb6326d8aa13ea309994d965ca045f58b 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # description: system-wide min/max/avg wakeup latency
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/wakeup-latency.pl
 
 
 
index 35081132ef97551f7bc352180a75037ccb927dab..df0c65f4ca93de35b07bbe9e24f5a966d8b75ea8 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # description: workqueue stats (ins/exe/create/destroy)
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl
 
 
 
index 9689bc0acd9f2d934050c71231c3d4635a2af7b2..13cc02b5893a7ee0a248b040eddc32cdc675a258 100644 (file)
@@ -6,6 +6,14 @@
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
 
+import errno, os
+
+FUTEX_WAIT = 0
+FUTEX_WAKE = 1
+FUTEX_PRIVATE_FLAG = 128
+FUTEX_CLOCK_REALTIME = 256
+FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+
 NSECS_PER_SEC    = 1000000000
 
 def avg(total, n):
@@ -24,5 +32,55 @@ def nsecs_str(nsecs):
     str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
     return str
 
+def add_stats(dict, key, value):
+       if not dict.has_key(key):
+               dict[key] = (value, value, value, 1)
+       else:
+               min, max, avg, count = dict[key]
+               if value < min:
+                       min = value
+               if value > max:
+                       max = value
+               avg = (avg + value) / 2
+               dict[key] = (min, max, avg, count + 1)
+
 def clear_term():
     print("\x1b[H\x1b[2J")
+
+audit_package_warned = False
+
+try:
+       import audit
+       machine_to_id = {
+               'x86_64': audit.MACH_86_64,
+               'alpha' : audit.MACH_ALPHA,
+               'ia64'  : audit.MACH_IA64,
+               'ppc'   : audit.MACH_PPC,
+               'ppc64' : audit.MACH_PPC64,
+               's390'  : audit.MACH_S390,
+               's390x' : audit.MACH_S390X,
+               'i386'  : audit.MACH_X86,
+               'i586'  : audit.MACH_X86,
+               'i686'  : audit.MACH_X86,
+       }
+       try:
+               machine_to_id['armeb'] = audit.MACH_ARMEB
+       except:
+               pass
+       machine_id = machine_to_id[os.uname()[4]]
+except:
+       if not audit_package_warned:
+               audit_package_warned = True
+               print "Install the audit-libs-python package to get syscall names"
+
+def syscall_name(id):
+       try:
+               return audit.audit_syscall_to_name(id, machine_id)
+       except:
+               return str(id)
+
+def strerror(nr):
+       try:
+               return errno.errorcode[abs(nr)]
+       except:
+               return "Unknown %d errno" % nr
index 30293545fcc2864f0f577669ed88a40353c1f4a9..03587021463d4ef6c7d25b4d0a852178fded5a86 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
new file mode 100644 (file)
index 0000000..5ecbb43
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-report b/tools/perf/scripts/python/bin/futex-contention-report
new file mode 100644 (file)
index 0000000..c826813
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: futext contention measurement
+
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py
index c3d0a638123d3a308212f2a09de1aeb8781634a8..4ad361b31249c03f10d424b842b92b6d9911acee 100644 (file)
@@ -2,4 +2,4 @@
 # description: display a process of packet and processing time
 # args: [tx] [rx] [dev=] [debug]
 
-perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@
+perf trace -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@
index 61d05f72e4431518127b156505ec549a481aed0f..df1791f07c24233c638e445d48ff3ab52955889c 100644 (file)
@@ -1,3 +1,3 @@
 #!/bin/bash
 # description: sched migration overview
-perf trace $@ -s ~/libexec/perf-core/scripts/python/sched-migration.py
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
index b01c842ae7b40aef8a77c5863991969d3c09c939..36b409c05e50ac5e6f80f7b82189aaa0d2ba096a 100644 (file)
@@ -21,4 +21,4 @@ elif [ "$n_args" -gt 0 ] ; then
     interval=$1
     shift
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/sctop.py $comm $interval
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval
index 9e9d8ddd72ced9d6b652c5c130070b86ecdc1cfe..4eb88c9fc83ce7e99e14e8b004f71930b422d89a 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
index dc076b618796fa999b3024540c9c747a2da4553a..cb2f9c5cf17e825972870c5c934500672e8bd15e 100644 (file)
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
        shift
     fi
 fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
index 0ca02278fe69ab7af792c4c708864ea9a1ce8138..acd7848717b35ea7c0c46ae61dc01241673f936d 100644 (file)
@@ -13,21 +13,26 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
 
 from perf_trace_context import *
 from Core import *
+from Util import *
 
-usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+usage = "perf trace -s syscall-counts-by-pid.py [comm|pid]\n";
 
 for_comm = None
+for_pid = None
 
 if len(sys.argv) > 2:
        sys.exit(usage)
 
 if len(sys.argv) > 1:
-       for_comm = sys.argv[1]
+       try:
+               for_pid = int(sys.argv[1])
+       except:
+               for_comm = sys.argv[1]
 
 syscalls = autodict()
 
 def trace_begin():
-       pass
+       print "Press control+C to stop and show the summary"
 
 def trace_end():
        print_error_totals()
@@ -35,9 +40,9 @@ def trace_end():
 def raw_syscalls__sys_exit(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
        id, ret):
-       if for_comm is not None:
-               if common_comm != for_comm:
-                       return
+       if (for_comm and common_comm != for_comm) or \
+          (for_pid  and common_pid  != for_pid ):
+               return
 
        if ret < 0:
                try:
@@ -62,7 +67,7 @@ def print_error_totals():
                    print "\n%s [%d]\n" % (comm, pid),
                    id_keys = syscalls[comm][pid].keys()
                    for id in id_keys:
-                           print "  syscall: %-16d\n" % (id),
+                           print "  syscall: %-16s\n" % syscall_name(id),
                            ret_keys = syscalls[comm][pid][id].keys()
                            for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k),  reverse = True):
-                                   print "    err = %-20d  %10d\n" % (ret, val),
+                                   print "    err = %-20s  %10d\n" % (strerror(ret), val),
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
new file mode 100644 (file)
index 0000000..11e70a3
--- /dev/null
@@ -0,0 +1,50 @@
+# futex contention
+# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Translation of:
+#
+# http://sourceware.org/systemtap/wiki/WSFutexContention
+#
+# to perf python scripting.
+#
+# Measures futex contention
+
+import os, sys
+sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+from Util import *
+
+process_names = {}
+thread_thislock = {}
+thread_blocktime = {}
+
+lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
+process_names = {} # long-lived pid-to-execname mapping
+
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
+                             nr, uaddr, op, val, utime, uaddr2, val3):
+       cmd = op & FUTEX_CMD_MASK
+       if cmd != FUTEX_WAIT:
+               return # we don't care about originators of WAKE events
+
+       process_names[tid] = comm
+       thread_thislock[tid] = uaddr
+       thread_blocktime[tid] = nsecs(s, ns)
+
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
+                            nr, ret):
+       if thread_blocktime.has_key(tid):
+               elapsed = nsecs(s, ns) - thread_blocktime[tid]
+               add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
+               del thread_blocktime[tid]
+               del thread_thislock[tid]
+
+def trace_begin():
+       print "Press control+C to stop and show the summary"
+
+def trace_end():
+       for (tid, lock) in lock_waits:
+               min, max, avg, count = lock_waits[tid, lock]
+               print "%s[%d] lock %x contended %d times, %d avg ns" % \
+                     (process_names[tid], tid, lock, count, avg)
+
index 6cafad40c2962930bcb027564d7967e5c91aeb62..7a6ec2c7d8abe7bf01b660210811829f37eb572d 100644 (file)
@@ -8,10 +8,7 @@
 # will be refreshed every [interval] seconds.  The default interval is
 # 3 seconds.
 
-import thread
-import time
-import os
-import sys
+import os, sys, thread, time
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -20,7 +17,7 @@ from perf_trace_context import *
 from Core import *
 from Util import *
 
-usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
+usage = "perf trace -s sctop.py [comm] [interval]\n";
 
 for_comm = None
 default_interval = 3
@@ -71,7 +68,7 @@ def print_syscall_totals(interval):
                for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
                                              reverse = True):
                        try:
-                               print "%-40d  %10d\n" % (id, val),
+                               print "%-40s  %10d\n" % (syscall_name(id), val),
                        except TypeError:
                                pass
                syscalls.clear()
index af722d6a4b3f1c8fa1cf585a0c9608d0524a185f..d1ee3ec10cf2b911776df81df7a5d4d66a5f5fc1 100644 (file)
@@ -5,29 +5,33 @@
 # Displays system-wide system call totals, broken down by syscall.
 # If a [comm] arg is specified, only syscalls called by [comm] are displayed.
 
-import os
-import sys
+import os, sys
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
 
 from perf_trace_context import *
 from Core import *
+from Util import syscall_name
 
 usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
 
 for_comm = None
+for_pid = None
 
 if len(sys.argv) > 2:
        sys.exit(usage)
 
 if len(sys.argv) > 1:
-       for_comm = sys.argv[1]
+       try:
+               for_pid = int(sys.argv[1])
+       except:
+               for_comm = sys.argv[1]
 
 syscalls = autodict()
 
 def trace_begin():
-       pass
+       print "Press control+C to stop and show the summary"
 
 def trace_end():
        print_syscall_totals()
@@ -35,9 +39,10 @@ def trace_end():
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
        id, args):
-       if for_comm is not None:
-               if common_comm != for_comm:
-                       return
+
+       if (for_comm and common_comm != for_comm) or \
+          (for_pid  and common_pid  != for_pid ):
+               return
        try:
                syscalls[common_comm][common_pid][id] += 1
        except TypeError:
@@ -61,4 +66,4 @@ def print_syscall_totals():
                    id_keys = syscalls[comm][pid].keys()
                    for id, val in sorted(syscalls[comm][pid].iteritems(), \
                                  key = lambda(k, v): (v, k),  reverse = True):
-                           print "  %-38d  %10d\n" % (id, val),
+                           print "  %-38s  %10d\n" % (syscall_name(id), val),
index f977e85ff04950cef3cc5550dcdca79a5d688568..ea183dc82d29e54a005f28648201a2219feac224 100644 (file)
@@ -13,6 +13,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
 
 from perf_trace_context import *
 from Core import *
+from Util import syscall_name
 
 usage = "perf trace -s syscall-counts.py [comm]\n";
 
@@ -27,7 +28,7 @@ if len(sys.argv) > 1:
 syscalls = autodict()
 
 def trace_begin():
-       pass
+       print "Press control+C to stop and show the summary"
 
 def trace_end():
        print_syscall_totals()
@@ -55,4 +56,4 @@ def print_syscall_totals():
 
     for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
                                  reverse = True):
-           print "%-40d  %10d\n" % (id, val),
+           print "%-40s  %10d\n" % (syscall_name(id), val),
index f9c7e3ad1aa715db6d562a45029a747800c20d6d..c8d81b00089d6d9d26ca431aea91453a86ef2351 100644 (file)
@@ -12,8 +12,8 @@
 #include "debug.h"
 #include "util.h"
 
-int verbose = 0;
-bool dump_trace = false;
+int verbose;
+bool dump_trace = false, quiet = false;
 
 int eprintf(int level, const char *fmt, ...)
 {
index 7a17ee061bcbd4c80ed42084e12bc61b834fa8b3..7b514082bbaff4992c31c590cd6d5b3153149f85 100644 (file)
@@ -6,7 +6,7 @@
 #include "event.h"
 
 extern int verbose;
-extern bool dump_trace;
+extern bool quiet, dump_trace;
 
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(event_t *event);
index 78575796d5f315bff5fa933c906821e610addfff..b397c038372813506092e09646c8d008e4dcf9b9 100644 (file)
@@ -215,6 +215,16 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *self,
        return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
 }
 
+static inline
+struct symbol *machine__find_kernel_function_by_name(struct machine *self,
+                                                    const char *name,
+                                                    struct map **mapp,
+                                                    symbol_filter_t filter)
+{
+       return map_groups__find_function_by_name(&self->kmaps, name, mapp,
+                                                filter);
+}
+
 int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
                                   int verbose, FILE *fp);
 
index fcc16e4349df9f3353ac03f975d9c1be9938863a..3b6a5297bf16cd5a318273bc0a9bf198734a0cfe 100644 (file)
@@ -74,10 +74,9 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
 static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
 static struct machine machine;
 
-/* Initialize symbol maps and path of vmlinux */
+/* Initialize symbol maps and path of vmlinux/modules */
 static int init_vmlinux(void)
 {
-       struct dso *kernel;
        int ret;
 
        symbol_conf.sort_by_name = true;
@@ -91,33 +90,61 @@ static int init_vmlinux(void)
                goto out;
        }
 
-       ret = machine__init(&machine, "/", 0);
+       ret = machine__init(&machine, "", HOST_KERNEL_ID);
        if (ret < 0)
                goto out;
 
-       kernel = dso__new_kernel(symbol_conf.vmlinux_name);
-       if (kernel == NULL)
-               die("Failed to create kernel dso.");
-
-       ret = __machine__create_kernel_maps(&machine, kernel);
-       if (ret < 0)
-               pr_debug("Failed to create kernel maps.\n");
-
+       if (machine__create_kernel_maps(&machine) < 0) {
+               pr_debug("machine__create_kernel_maps ");
+               goto out;
+       }
 out:
        if (ret < 0)
                pr_warning("Failed to init vmlinux path.\n");
        return ret;
 }
 
+static struct symbol *__find_kernel_function_by_name(const char *name,
+                                                    struct map **mapp)
+{
+       return machine__find_kernel_function_by_name(&machine, name, mapp,
+                                                    NULL);
+}
+
+const char *kernel_get_module_path(const char *module)
+{
+       struct dso *dso;
+
+       if (module) {
+               list_for_each_entry(dso, &machine.kernel_dsos, node) {
+                       if (strncmp(dso->short_name + 1, module,
+                                   dso->short_name_len - 2) == 0)
+                               goto found;
+               }
+               pr_debug("Failed to find module %s.\n", module);
+               return NULL;
+       } else {
+               dso = machine.vmlinux_maps[MAP__FUNCTION]->dso;
+               if (dso__load_vmlinux_path(dso,
+                        machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
+                       pr_debug("Failed to load kernel map.\n");
+                       return NULL;
+               }
+       }
+found:
+       return dso->long_name;
+}
+
 #ifdef DWARF_SUPPORT
-static int open_vmlinux(void)
+static int open_vmlinux(const char *module)
 {
-       if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
-               pr_debug("Failed to load kernel map.\n");
-               return -EINVAL;
+       const char *path = kernel_get_module_path(module);
+       if (!path) {
+               pr_err("Failed to find path of %s module", module ?: "kernel");
+               return -ENOENT;
        }
-       pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name);
-       return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
+       pr_debug("Try to open %s\n", path);
+       return open(path, O_RDONLY);
 }
 
 /*
@@ -125,20 +152,19 @@ static int open_vmlinux(void)
  * Currently only handles kprobes.
  */
 static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
-                                      struct perf_probe_point *pp)
+                                       struct perf_probe_point *pp)
 {
        struct symbol *sym;
-       int fd, ret = -ENOENT;
+       struct map *map;
+       u64 addr;
+       int ret = -ENOENT;
 
-       sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
-                                      tp->symbol, NULL);
+       sym = __find_kernel_function_by_name(tp->symbol, &map);
        if (sym) {
-               fd = open_vmlinux();
-               if (fd >= 0) {
-                       ret = find_perf_probe_point(fd,
-                                                sym->start + tp->offset, pp);
-                       close(fd);
-               }
+               addr = map->unmap_ip(map, sym->start + tp->offset);
+               pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+                        tp->offset, addr);
+               ret = find_perf_probe_point((unsigned long)addr, pp);
        }
        if (ret <= 0) {
                pr_debug("Failed to find corresponding probes from "
@@ -156,12 +182,12 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
 /* Try to find perf_probe_event with debuginfo */
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                           struct probe_trace_event **tevs,
-                                          int max_tevs)
+                                          int max_tevs, const char *module)
 {
        bool need_dwarf = perf_probe_event_need_dwarf(pev);
        int fd, ntevs;
 
-       fd = open_vmlinux();
+       fd = open_vmlinux(module);
        if (fd < 0) {
                if (need_dwarf) {
                        pr_warning("Failed to open debuginfo file.\n");
@@ -300,7 +326,7 @@ error:
  * Show line-range always requires debuginfo to find source file and
  * line number.
  */
-int show_line_range(struct line_range *lr)
+int show_line_range(struct line_range *lr, const char *module)
 {
        int l = 1;
        struct line_node *ln;
@@ -313,7 +339,7 @@ int show_line_range(struct line_range *lr)
        if (ret < 0)
                return ret;
 
-       fd = open_vmlinux();
+       fd = open_vmlinux(module);
        if (fd < 0) {
                pr_warning("Failed to open debuginfo file.\n");
                return fd;
@@ -378,11 +404,84 @@ end:
        return ret;
 }
 
+static int show_available_vars_at(int fd, struct perf_probe_event *pev,
+                                 int max_vls, bool externs)
+{
+       char *buf;
+       int ret, i;
+       struct str_node *node;
+       struct variable_list *vls = NULL, *vl;
+
+       buf = synthesize_perf_probe_point(&pev->point);
+       if (!buf)
+               return -EINVAL;
+       pr_debug("Searching variables at %s\n", buf);
+
+       ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
+       if (ret > 0) {
+               /* Some variables were found */
+               fprintf(stdout, "Available variables at %s\n", buf);
+               for (i = 0; i < ret; i++) {
+                       vl = &vls[i];
+                       /*
+                        * A probe point might be converted to
+                        * several trace points.
+                        */
+                       fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+                               vl->point.offset);
+                       free(vl->point.symbol);
+                       if (vl->vars) {
+                               strlist__for_each(node, vl->vars)
+                                       fprintf(stdout, "\t\t%s\n", node->s);
+                               strlist__delete(vl->vars);
+                       } else
+                               fprintf(stdout, "(No variables)\n");
+               }
+               free(vls);
+       } else
+               pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+
+       free(buf);
+       return ret;
+}
+
+/* Show available variables on given probe point */
+int show_available_vars(struct perf_probe_event *pevs, int npevs,
+                       int max_vls, const char *module, bool externs)
+{
+       int i, fd, ret = 0;
+
+       ret = init_vmlinux();
+       if (ret < 0)
+               return ret;
+
+       fd = open_vmlinux(module);
+       if (fd < 0) {
+               pr_warning("Failed to open debuginfo file.\n");
+               return fd;
+       }
+
+       setup_pager();
+
+       for (i = 0; i < npevs && ret >= 0; i++)
+               ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
+
+       close(fd);
+       return ret;
+}
+
 #else  /* !DWARF_SUPPORT */
 
 static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
-                                      struct perf_probe_point *pp)
+                                       struct perf_probe_point *pp)
 {
+       struct symbol *sym;
+
+       sym = __find_kernel_function_by_name(tp->symbol, NULL);
+       if (!sym) {
+               pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
+               return -ENOENT;
+       }
        pp->function = strdup(tp->symbol);
        if (pp->function == NULL)
                return -ENOMEM;
@@ -394,7 +493,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
 
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                struct probe_trace_event **tevs __unused,
-                               int max_tevs __unused)
+                               int max_tevs __unused, const char *mod __unused)
 {
        if (perf_probe_event_need_dwarf(pev)) {
                pr_warning("Debuginfo-analysis is not supported.\n");
@@ -403,12 +502,19 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
        return 0;
 }
 
-int show_line_range(struct line_range *lr __unused)
+int show_line_range(struct line_range *lr __unused, const char *module __unused)
 {
        pr_warning("Debuginfo-analysis is not supported.\n");
        return -ENOSYS;
 }
 
+int show_available_vars(struct perf_probe_event *pevs __unused,
+                       int npevs __unused, int max_vls __unused,
+                       const char *module __unused, bool externs __unused)
+{
+       pr_warning("Debuginfo-analysis is not supported.\n");
+       return -ENOSYS;
+}
 #endif
 
 int parse_line_range_desc(const char *arg, struct line_range *lr)
@@ -1087,7 +1193,7 @@ error:
 }
 
 static int convert_to_perf_probe_event(struct probe_trace_event *tev,
-                               struct perf_probe_event *pev)
+                                      struct perf_probe_event *pev)
 {
        char buf[64] = "";
        int i, ret;
@@ -1516,14 +1622,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
 
 static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                                          struct probe_trace_event **tevs,
-                                         int max_tevs)
+                                         int max_tevs, const char *module)
 {
        struct symbol *sym;
        int ret = 0, i;
        struct probe_trace_event *tev;
 
        /* Convert perf_probe_event with debuginfo */
-       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs);
+       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module);
        if (ret != 0)
                return ret;
 
@@ -1572,8 +1678,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
        }
 
        /* Currently just checking function name from symbol map */
-       sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
-                                      tev->point.symbol, NULL);
+       sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
        if (!sym) {
                pr_warning("Kernel symbol \'%s\' not found.\n",
                           tev->point.symbol);
@@ -1596,7 +1701,7 @@ struct __event_package {
 };
 
 int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
-                         bool force_add, int max_tevs)
+                         int max_tevs, const char *module, bool force_add)
 {
        int i, j, ret;
        struct __event_package *pkgs;
@@ -1617,7 +1722,9 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
                pkgs[i].pev = &pevs[i];
                /* Convert with or without debuginfo */
                ret  = convert_to_probe_trace_events(pkgs[i].pev,
-                                                     &pkgs[i].tevs, max_tevs);
+                                                    &pkgs[i].tevs,
+                                                    max_tevs,
+                                                    module);
                if (ret < 0)
                        goto end;
                pkgs[i].ntevs = ret;
index 5af39243a25bd00975d6880f4524180edebb236a..5accbedfea372b6761727c0fc1c824e237f12c72 100644 (file)
@@ -90,6 +90,12 @@ struct line_range {
        struct list_head        line_list;      /* Visible lines */
 };
 
+/* List of variables */
+struct variable_list {
+       struct probe_trace_point        point;  /* Actual probepoint */
+       struct strlist                  *vars;  /* Available variables */
+};
+
 /* Command string to events */
 extern int parse_perf_probe_command(const char *cmd,
                                    struct perf_probe_event *pev);
@@ -109,12 +115,18 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev);
 /* Command string to line-range */
 extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
 
+/* Internal use: Return kernel/module path */
+extern const char *kernel_get_module_path(const char *module);
 
 extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
-                                bool force_add, int max_probe_points);
+                                int max_probe_points, const char *module,
+                                bool force_add);
 extern int del_perf_probe_events(struct strlist *dellist);
 extern int show_perf_probe_events(void);
-extern int show_line_range(struct line_range *lr);
+extern int show_line_range(struct line_range *lr, const char *module);
+extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
+                              int max_probe_points, const char *module,
+                              bool externs);
 
 
 /* Maximum index number of event-name postfix */
index 32b81f707ff5eb5d950a2d233d6c00c6b90fedb2..3991d73d1cff9164cb23fb4aae3169af97abfe77 100644 (file)
@@ -116,6 +116,101 @@ static void line_list__free(struct list_head *head)
        }
 }
 
+/* Dwarf FL wrappers */
+
+static int __linux_kernel_find_elf(Dwfl_Module *mod,
+                                  void **userdata,
+                                  const char *module_name,
+                                  Dwarf_Addr base,
+                                  char **file_name, Elf **elfp)
+{
+       int fd;
+       const char *path = kernel_get_module_path(module_name);
+
+       if (path) {
+               fd = open(path, O_RDONLY);
+               if (fd >= 0) {
+                       *file_name = strdup(path);
+                       return fd;
+               }
+       }
+       /* If failed, try to call standard method */
+       return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
+                                         file_name, elfp);
+}
+
+static char *debuginfo_path;   /* Currently dummy */
+
+static const Dwfl_Callbacks offline_callbacks = {
+       .find_debuginfo = dwfl_standard_find_debuginfo,
+       .debuginfo_path = &debuginfo_path,
+
+       .section_address = dwfl_offline_section_address,
+
+       /* We use this table for core files too.  */
+       .find_elf = dwfl_build_id_find_elf,
+};
+
+static const Dwfl_Callbacks kernel_callbacks = {
+       .find_debuginfo = dwfl_standard_find_debuginfo,
+       .debuginfo_path = &debuginfo_path,
+
+       .find_elf = __linux_kernel_find_elf,
+       .section_address = dwfl_linux_kernel_module_section_address,
+};
+
+/* Get a Dwarf from offline image */
+static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias)
+{
+       Dwfl_Module *mod;
+       Dwarf *dbg = NULL;
+
+       if (!dwflp)
+               return NULL;
+
+       *dwflp = dwfl_begin(&offline_callbacks);
+       if (!*dwflp)
+               return NULL;
+
+       mod = dwfl_report_offline(*dwflp, "", "", fd);
+       if (!mod)
+               goto error;
+
+       dbg = dwfl_module_getdwarf(mod, bias);
+       if (!dbg) {
+error:
+               dwfl_end(*dwflp);
+               *dwflp = NULL;
+       }
+       return dbg;
+}
+
+/* Get a Dwarf from live kernel image */
+static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp,
+                                         Dwarf_Addr *bias)
+{
+       Dwarf *dbg;
+
+       if (!dwflp)
+               return NULL;
+
+       *dwflp = dwfl_begin(&kernel_callbacks);
+       if (!*dwflp)
+               return NULL;
+
+       /* Load the kernel dwarves: Don't care the result here */
+       dwfl_linux_kernel_report_kernel(*dwflp);
+       dwfl_linux_kernel_report_modules(*dwflp);
+
+       dbg = dwfl_addrdwarf(*dwflp, addr, bias);
+       /* Here, check whether we could get a real dwarf */
+       if (!dbg) {
+               dwfl_end(*dwflp);
+               *dwflp = NULL;
+       }
+       return dbg;
+}
+
 /* Dwarf wrappers */
 
 /* Find the realpath of the target file. */
@@ -160,26 +255,44 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
        return name ? (strcmp(tname, name) == 0) : false;
 }
 
-/* Get type die, but skip qualifiers and typedef */
-static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+/* Get type die */
+static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
 {
        Dwarf_Attribute attr;
+
+       if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
+           dwarf_formref_die(&attr, die_mem))
+               return die_mem;
+       else
+               return NULL;
+}
+
+/* Get a type die, but skip qualifiers */
+static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
        int tag;
 
        do {
-               if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL ||
-                   dwarf_formref_die(&attr, die_mem) == NULL)
-                       return NULL;
-
-               tag = dwarf_tag(die_mem);
-               vr_die = die_mem;
+               vr_die = die_get_type(vr_die, die_mem);
+               if (!vr_die)
+                       break;
+               tag = dwarf_tag(vr_die);
        } while (tag == DW_TAG_const_type ||
                 tag == DW_TAG_restrict_type ||
                 tag == DW_TAG_volatile_type ||
-                tag == DW_TAG_shared_type ||
-                tag == DW_TAG_typedef);
+                tag == DW_TAG_shared_type);
+
+       return vr_die;
+}
 
-       return die_mem;
+/* Get a type die, but skip qualifiers and typedef */
+static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+       do {
+               vr_die = __die_get_real_type(vr_die, die_mem);
+       } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
+
+       return vr_die;
 }
 
 static bool die_is_signed_type(Dwarf_Die *tp_die)
@@ -320,25 +433,35 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
        return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
 }
 
+struct __find_variable_param {
+       const char *name;
+       Dwarf_Addr addr;
+};
+
 static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
 {
-       const char *name = data;
+       struct __find_variable_param *fvp = data;
        int tag;
 
        tag = dwarf_tag(die_mem);
        if ((tag == DW_TAG_formal_parameter ||
             tag == DW_TAG_variable) &&
-           die_compare_name(die_mem, name))
+           die_compare_name(die_mem, fvp->name))
                return DIE_FIND_CB_FOUND;
 
-       return DIE_FIND_CB_CONTINUE;
+       if (dwarf_haspc(die_mem, fvp->addr))
+               return DIE_FIND_CB_CONTINUE;
+       else
+               return DIE_FIND_CB_SIBLING;
 }
 
-/* Find a variable called 'name' */
-static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
-                                   Dwarf_Die *die_mem)
+/* Find a variable called 'name' at given address */
+static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+                                      Dwarf_Addr addr, Dwarf_Die *die_mem)
 {
-       return die_find_child(sp_die, __die_find_variable_cb, (void *)name,
+       struct __find_variable_param fvp = { .name = name, .addr = addr};
+
+       return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
                              die_mem);
 }
 
@@ -361,6 +484,60 @@ static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
                              die_mem);
 }
 
+/* Get the name of given variable DIE */
+static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+{
+       Dwarf_Die type;
+       int tag, ret, ret2;
+       const char *tmp = "";
+
+       if (__die_get_real_type(vr_die, &type) == NULL)
+               return -ENOENT;
+
+       tag = dwarf_tag(&type);
+       if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+               tmp = "*";
+       else if (tag == DW_TAG_subroutine_type) {
+               /* Function pointer */
+               ret = snprintf(buf, len, "(function_type)");
+               return (ret >= len) ? -E2BIG : ret;
+       } else {
+               if (!dwarf_diename(&type))
+                       return -ENOENT;
+               if (tag == DW_TAG_union_type)
+                       tmp = "union ";
+               else if (tag == DW_TAG_structure_type)
+                       tmp = "struct ";
+               /* Write a base name */
+               ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
+               return (ret >= len) ? -E2BIG : ret;
+       }
+       ret = die_get_typename(&type, buf, len);
+       if (ret > 0) {
+               ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
+               ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+       }
+       return ret;
+}
+
+/* Get the name and type of given variable DIE, stored as "type\tname" */
+static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+{
+       int ret, ret2;
+
+       ret = die_get_typename(vr_die, buf, len);
+       if (ret < 0) {
+               pr_debug("Failed to get type, make it unknown.\n");
+               ret = snprintf(buf, len, "(unknown_type)");
+       }
+       if (ret > 0) {
+               ret2 = snprintf(buf + ret, len - ret, "\t%s",
+                               dwarf_diename(vr_die));
+               ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+       }
+       return ret;
+}
+
 /*
  * Probe finder related functions
  */
@@ -374,8 +551,13 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
        return ref;
 }
 
-/* Show a location */
-static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
+/*
+ * Convert a location into trace_arg.
+ * If tvar == NULL, this just checks variable can be converted.
+ */
+static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
+                                    Dwarf_Op *fb_ops,
+                                    struct probe_trace_arg *tvar)
 {
        Dwarf_Attribute attr;
        Dwarf_Op *op;
@@ -384,20 +566,23 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
        Dwarf_Word offs = 0;
        bool ref = false;
        const char *regs;
-       struct probe_trace_arg *tvar = pf->tvar;
        int ret;
 
+       if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
+               goto static_var;
+
        /* TODO: handle more than 1 exprs */
        if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
-           dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 ||
+           dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 ||
            nops == 0) {
                /* TODO: Support const_value */
-               pr_err("Failed to find the location of %s at this address.\n"
-                      " Perhaps, it has been optimized out.\n", pf->pvar->var);
                return -ENOENT;
        }
 
        if (op->atom == DW_OP_addr) {
+static_var:
+               if (!tvar)
+                       return 0;
                /* Static variables on memory (not stack), make @varname */
                ret = strlen(dwarf_diename(vr_die));
                tvar->value = zalloc(ret + 2);
@@ -412,14 +597,11 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
 
        /* If this is based on frame buffer, set the offset */
        if (op->atom == DW_OP_fbreg) {
-               if (pf->fb_ops == NULL) {
-                       pr_warning("The attribute of frame base is not "
-                                  "supported.\n");
+               if (fb_ops == NULL)
                        return -ENOTSUP;
-               }
                ref = true;
                offs = op->number;
-               op = &pf->fb_ops[0];
+               op = &fb_ops[0];
        }
 
        if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
@@ -435,13 +617,18 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
        } else if (op->atom == DW_OP_regx) {
                regn = op->number;
        } else {
-               pr_warning("DW_OP %x is not supported.\n", op->atom);
+               pr_debug("DW_OP %x is not supported.\n", op->atom);
                return -ENOTSUP;
        }
 
+       if (!tvar)
+               return 0;
+
        regs = get_arch_regstr(regn);
        if (!regs) {
-               pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn);
+               /* This should be a bug in DWARF or this tool */
+               pr_warning("Mapping for DWARF register number %u "
+                          "missing on this architecture.", regn);
                return -ERANGE;
        }
 
@@ -666,8 +853,14 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
        pr_debug("Converting variable %s into trace event.\n",
                 dwarf_diename(vr_die));
 
-       ret = convert_variable_location(vr_die, pf);
-       if (ret == 0 && pf->pvar->field) {
+       ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
+                                       pf->tvar);
+       if (ret == -ENOENT)
+               pr_err("Failed to find the location of %s at this address.\n"
+                      " Perhaps, it has been optimized out.\n", pf->pvar->var);
+       else if (ret == -ENOTSUP)
+               pr_err("Sorry, we don't support this variable location yet.\n");
+       else if (pf->pvar->field) {
                ret = convert_variable_fields(vr_die, pf->pvar->var,
                                              pf->pvar->field, &pf->tvar->ref,
                                              &die_mem);
@@ -722,56 +915,39 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        pr_debug("Searching '%s' variable in context.\n",
                 pf->pvar->var);
        /* Search child die for local variables and parameters. */
-       if (die_find_variable(sp_die, pf->pvar->var, &vr_die))
+       if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die))
                ret = convert_variable(&vr_die, pf);
        else {
                /* Search upper class */
                nscopes = dwarf_getscopes_die(sp_die, &scopes);
-               if (nscopes > 0) {
-                       ret = dwarf_getscopevar(scopes, nscopes, pf->pvar->var,
-                                               0, NULL, 0, 0, &vr_die);
-                       if (ret >= 0)
+               while (nscopes-- > 1) {
+                       pr_debug("Searching variables in %s\n",
+                                dwarf_diename(&scopes[nscopes]));
+                       /* We should check this scope, so give dummy address */
+                       if (die_find_variable_at(&scopes[nscopes],
+                                                pf->pvar->var, 0,
+                                                &vr_die)) {
                                ret = convert_variable(&vr_die, pf);
-                       else
-                               ret = -ENOENT;
+                               goto found;
+                       }
+               }
+               if (scopes)
                        free(scopes);
-               } else
-                       ret = -ENOENT;
+               ret = -ENOENT;
        }
+found:
        if (ret < 0)
                pr_warning("Failed to find '%s' in this function.\n",
                           pf->pvar->var);
        return ret;
 }
 
-/* Show a probe point to output buffer */
-static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
+/* Convert subprogram DIE to trace point */
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
+                                 bool retprobe, struct probe_trace_point *tp)
 {
-       struct probe_trace_event *tev;
        Dwarf_Addr eaddr;
-       Dwarf_Die die_mem;
        const char *name;
-       int ret, i;
-       Dwarf_Attribute fb_attr;
-       size_t nops;
-
-       if (pf->ntevs == pf->max_tevs) {
-               pr_warning("Too many( > %d) probe point found.\n",
-                          pf->max_tevs);
-               return -ERANGE;
-       }
-       tev = &pf->tevs[pf->ntevs++];
-
-       /* If no real subprogram, find a real one */
-       if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
-               sp_die = die_find_real_subprogram(&pf->cu_die,
-                                                pf->addr, &die_mem);
-               if (!sp_die) {
-                       pr_warning("Failed to find probe point in any "
-                                  "functions.\n");
-                       return -ENOENT;
-               }
-       }
 
        /* Copy the name of probe point */
        name = dwarf_diename(sp_die);
@@ -781,26 +957,45 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
                                   dwarf_diename(sp_die));
                        return -ENOENT;
                }
-               tev->point.symbol = strdup(name);
-               if (tev->point.symbol == NULL)
+               tp->symbol = strdup(name);
+               if (tp->symbol == NULL)
                        return -ENOMEM;
-               tev->point.offset = (unsigned long)(pf->addr - eaddr);
+               tp->offset = (unsigned long)(paddr - eaddr);
        } else
                /* This function has no name. */
-               tev->point.offset = (unsigned long)pf->addr;
+               tp->offset = (unsigned long)paddr;
 
        /* Return probe must be on the head of a subprogram */
-       if (pf->pev->point.retprobe) {
-               if (tev->point.offset != 0) {
+       if (retprobe) {
+               if (eaddr != paddr) {
                        pr_warning("Return probe must be on the head of"
                                   " a real function\n");
                        return -EINVAL;
                }
-               tev->point.retprobe = true;
+               tp->retprobe = true;
        }
 
-       pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
-                tev->point.offset);
+       return 0;
+}
+
+/* Call probe_finder callback with real subprogram DIE */
+static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+       Dwarf_Die die_mem;
+       Dwarf_Attribute fb_attr;
+       size_t nops;
+       int ret;
+
+       /* If no real subprogram, find a real one */
+       if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
+               sp_die = die_find_real_subprogram(&pf->cu_die,
+                                                 pf->addr, &die_mem);
+               if (!sp_die) {
+                       pr_warning("Failed to find probe point in any "
+                                  "functions.\n");
+                       return -ENOENT;
+               }
+       }
 
        /* Get the frame base attribute/ops */
        dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
@@ -820,22 +1015,13 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
 #endif
        }
 
-       /* Find each argument */
-       tev->nargs = pf->pev->nargs;
-       tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
-       if (tev->args == NULL)
-               return -ENOMEM;
-       for (i = 0; i < pf->pev->nargs; i++) {
-               pf->pvar = &pf->pev->args[i];
-               pf->tvar = &tev->args[i];
-               ret = find_variable(sp_die, pf);
-               if (ret != 0)
-                       return ret;
-       }
+       /* Call finder's callback handler */
+       ret = pf->callback(sp_die, pf);
 
        /* *pf->fb_ops will be cached in libdw. Don't free it. */
        pf->fb_ops = NULL;
-       return 0;
+
+       return ret;
 }
 
 /* Find probe point from its line number */
@@ -871,7 +1057,7 @@ static int find_probe_point_by_line(struct probe_finder *pf)
                         (int)i, lineno, (uintmax_t)addr);
                pf->addr = addr;
 
-               ret = convert_probe_point(NULL, pf);
+               ret = call_probe_finder(NULL, pf);
                /* Continuing, because target line might be inlined. */
        }
        return ret;
@@ -984,7 +1170,7 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
                         (int)i, lineno, (unsigned long long)addr);
                pf->addr = addr;
 
-               ret = convert_probe_point(sp_die, pf);
+               ret = call_probe_finder(sp_die, pf);
                /* Continuing, because target line might be inlined. */
        }
        /* TODO: deallocate lines, but how? */
@@ -1019,7 +1205,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
                pr_debug("found inline addr: 0x%jx\n",
                         (uintmax_t)pf->addr);
 
-               param->retval = convert_probe_point(in_die, pf);
+               param->retval = call_probe_finder(in_die, pf);
                if (param->retval < 0)
                        return DWARF_CB_ABORT;
        }
@@ -1057,7 +1243,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
                        }
                        pf->addr += pp->offset;
                        /* TODO: Check the address in this function */
-                       param->retval = convert_probe_point(sp_die, pf);
+                       param->retval = call_probe_finder(sp_die, pf);
                }
        } else {
                struct dwarf_callback_param _param = {.data = (void *)pf,
@@ -1079,90 +1265,276 @@ static int find_probe_point_by_func(struct probe_finder *pf)
        return _param.retval;
 }
 
-/* Find probe_trace_events specified by perf_probe_event from debuginfo */
-int find_probe_trace_events(int fd, struct perf_probe_event *pev,
-                            struct probe_trace_event **tevs, int max_tevs)
+/* Find probe points from debuginfo */
+static int find_probes(int fd, struct probe_finder *pf)
 {
-       struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs};
-       struct perf_probe_point *pp = &pev->point;
+       struct perf_probe_point *pp = &pf->pev->point;
        Dwarf_Off off, noff;
        size_t cuhl;
        Dwarf_Die *diep;
-       Dwarf *dbg;
+       Dwarf *dbg = NULL;
+       Dwfl *dwfl;
+       Dwarf_Addr bias;        /* Currently ignored */
        int ret = 0;
 
-       pf.tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
-       if (pf.tevs == NULL)
-               return -ENOMEM;
-       *tevs = pf.tevs;
-       pf.ntevs = 0;
-
-       dbg = dwarf_begin(fd, DWARF_C_READ);
+       dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
        if (!dbg) {
                pr_warning("No dwarf info found in the vmlinux - "
                        "please rebuild with CONFIG_DEBUG_INFO=y.\n");
-               free(pf.tevs);
-               *tevs = NULL;
                return -EBADF;
        }
 
 #if _ELFUTILS_PREREQ(0, 142)
        /* Get the call frame information from this dwarf */
-       pf.cfi = dwarf_getcfi(dbg);
+       pf->cfi = dwarf_getcfi(dbg);
 #endif
 
        off = 0;
-       line_list__init(&pf.lcache);
+       line_list__init(&pf->lcache);
        /* Loop on CUs (Compilation Unit) */
        while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
               ret >= 0) {
                /* Get the DIE(Debugging Information Entry) of this CU */
-               diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
+               diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
                if (!diep)
                        continue;
 
                /* Check if target file is included. */
                if (pp->file)
-                       pf.fname = cu_find_realpath(&pf.cu_die, pp->file);
+                       pf->fname = cu_find_realpath(&pf->cu_die, pp->file);
                else
-                       pf.fname = NULL;
+                       pf->fname = NULL;
 
-               if (!pp->file || pf.fname) {
+               if (!pp->file || pf->fname) {
                        if (pp->function)
-                               ret = find_probe_point_by_func(&pf);
+                               ret = find_probe_point_by_func(pf);
                        else if (pp->lazy_line)
-                               ret = find_probe_point_lazy(NULL, &pf);
+                               ret = find_probe_point_lazy(NULL, pf);
                        else {
-                               pf.lno = pp->line;
-                               ret = find_probe_point_by_line(&pf);
+                               pf->lno = pp->line;
+                               ret = find_probe_point_by_line(pf);
                        }
                }
                off = noff;
        }
-       line_list__free(&pf.lcache);
-       dwarf_end(dbg);
+       line_list__free(&pf->lcache);
+       if (dwfl)
+               dwfl_end(dwfl);
 
-       return (ret < 0) ? ret : pf.ntevs;
+       return ret;
+}
+
+/* Add a found probe point into trace event list */
+static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+       struct trace_event_finder *tf =
+                       container_of(pf, struct trace_event_finder, pf);
+       struct probe_trace_event *tev;
+       int ret, i;
+
+       /* Check number of tevs */
+       if (tf->ntevs == tf->max_tevs) {
+               pr_warning("Too many( > %d) probe point found.\n",
+                          tf->max_tevs);
+               return -ERANGE;
+       }
+       tev = &tf->tevs[tf->ntevs++];
+
+       ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
+                                    &tev->point);
+       if (ret < 0)
+               return ret;
+
+       pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
+                tev->point.offset);
+
+       /* Find each argument */
+       tev->nargs = pf->pev->nargs;
+       tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+       if (tev->args == NULL)
+               return -ENOMEM;
+       for (i = 0; i < pf->pev->nargs; i++) {
+               pf->pvar = &pf->pev->args[i];
+               pf->tvar = &tev->args[i];
+               ret = find_variable(sp_die, pf);
+               if (ret != 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+int find_probe_trace_events(int fd, struct perf_probe_event *pev,
+                           struct probe_trace_event **tevs, int max_tevs)
+{
+       struct trace_event_finder tf = {
+                       .pf = {.pev = pev, .callback = add_probe_trace_event},
+                       .max_tevs = max_tevs};
+       int ret;
+
+       /* Allocate result tevs array */
+       *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
+       if (*tevs == NULL)
+               return -ENOMEM;
+
+       tf.tevs = *tevs;
+       tf.ntevs = 0;
+
+       ret = find_probes(fd, &tf.pf);
+       if (ret < 0) {
+               free(*tevs);
+               *tevs = NULL;
+               return ret;
+       }
+
+       return (ret < 0) ? ret : tf.ntevs;
+}
+
+#define MAX_VAR_LEN 64
+
+/* Collect available variables in this scope */
+static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
+{
+       struct available_var_finder *af = data;
+       struct variable_list *vl;
+       char buf[MAX_VAR_LEN];
+       int tag, ret;
+
+       vl = &af->vls[af->nvls - 1];
+
+       tag = dwarf_tag(die_mem);
+       if (tag == DW_TAG_formal_parameter ||
+           tag == DW_TAG_variable) {
+               ret = convert_variable_location(die_mem, af->pf.addr,
+                                               af->pf.fb_ops, NULL);
+               if (ret == 0) {
+                       ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
+                       pr_debug2("Add new var: %s\n", buf);
+                       if (ret > 0)
+                               strlist__add(vl->vars, buf);
+               }
+       }
+
+       if (af->child && dwarf_haspc(die_mem, af->pf.addr))
+               return DIE_FIND_CB_CONTINUE;
+       else
+               return DIE_FIND_CB_SIBLING;
+}
+
+/* Add a found vars into available variables list */
+static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+       struct available_var_finder *af =
+                       container_of(pf, struct available_var_finder, pf);
+       struct variable_list *vl;
+       Dwarf_Die die_mem, *scopes = NULL;
+       int ret, nscopes;
+
+       /* Check number of tevs */
+       if (af->nvls == af->max_vls) {
+               pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
+               return -ERANGE;
+       }
+       vl = &af->vls[af->nvls++];
+
+       ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
+                                    &vl->point);
+       if (ret < 0)
+               return ret;
+
+       pr_debug("Probe point found: %s+%lu\n", vl->point.symbol,
+                vl->point.offset);
+
+       /* Find local variables */
+       vl->vars = strlist__new(true, NULL);
+       if (vl->vars == NULL)
+               return -ENOMEM;
+       af->child = true;
+       die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem);
+
+       /* Find external variables */
+       if (!af->externs)
+               goto out;
+       /* Don't need to search child DIE for externs. */
+       af->child = false;
+       nscopes = dwarf_getscopes_die(sp_die, &scopes);
+       while (nscopes-- > 1)
+               die_find_child(&scopes[nscopes], collect_variables_cb,
+                              (void *)af, &die_mem);
+       if (scopes)
+               free(scopes);
+
+out:
+       if (strlist__empty(vl->vars)) {
+               strlist__delete(vl->vars);
+               vl->vars = NULL;
+       }
+
+       return ret;
+}
+
+/* Find available variables at given probe point */
+int find_available_vars_at(int fd, struct perf_probe_event *pev,
+                          struct variable_list **vls, int max_vls,
+                          bool externs)
+{
+       struct available_var_finder af = {
+                       .pf = {.pev = pev, .callback = add_available_vars},
+                       .max_vls = max_vls, .externs = externs};
+       int ret;
+
+       /* Allocate result vls array */
+       *vls = zalloc(sizeof(struct variable_list) * max_vls);
+       if (*vls == NULL)
+               return -ENOMEM;
+
+       af.vls = *vls;
+       af.nvls = 0;
+
+       ret = find_probes(fd, &af.pf);
+       if (ret < 0) {
+               /* Free vlist for error */
+               while (af.nvls--) {
+                       if (af.vls[af.nvls].point.symbol)
+                               free(af.vls[af.nvls].point.symbol);
+                       if (af.vls[af.nvls].vars)
+                               strlist__delete(af.vls[af.nvls].vars);
+               }
+               free(af.vls);
+               *vls = NULL;
+               return ret;
+       }
+
+       return (ret < 0) ? ret : af.nvls;
 }
 
 /* Reverse search */
-int find_perf_probe_point(int fd, unsigned long addr,
-                         struct perf_probe_point *ppt)
+int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt)
 {
        Dwarf_Die cudie, spdie, indie;
-       Dwarf *dbg;
+       Dwarf *dbg = NULL;
+       Dwfl *dwfl = NULL;
        Dwarf_Line *line;
-       Dwarf_Addr laddr, eaddr;
+       Dwarf_Addr laddr, eaddr, bias = 0;
        const char *tmp;
        int lineno, ret = 0;
        bool found = false;
 
-       dbg = dwarf_begin(fd, DWARF_C_READ);
-       if (!dbg)
-               return -EBADF;
+       /* Open the live linux kernel */
+       dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias);
+       if (!dbg) {
+               pr_warning("No dwarf info found in the vmlinux - "
+                       "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+               ret = -EINVAL;
+               goto end;
+       }
 
+       /* Adjust address with bias */
+       addr += bias;
        /* Find cu die */
-       if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) {
+       if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) {
+               pr_warning("No CU DIE is found at %lx\n", addr);
                ret = -EINVAL;
                goto end;
        }
@@ -1225,7 +1597,8 @@ found:
        }
 
 end:
-       dwarf_end(dbg);
+       if (dwfl)
+               dwfl_end(dwfl);
        if (ret >= 0)
                ret = found ? 1 : 0;
        return ret;
@@ -1358,6 +1731,9 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        struct line_finder *lf = param->data;
        struct line_range *lr = lf->lr;
 
+       pr_debug("find (%llx) %s\n",
+                (unsigned long long)dwarf_dieoffset(sp_die),
+                dwarf_diename(sp_die));
        if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
            die_compare_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
@@ -1401,10 +1777,12 @@ int find_line_range(int fd, struct line_range *lr)
        Dwarf_Off off = 0, noff;
        size_t cuhl;
        Dwarf_Die *diep;
-       Dwarf *dbg;
+       Dwarf *dbg = NULL;
+       Dwfl *dwfl;
+       Dwarf_Addr bias;        /* Currently ignored */
        const char *comp_dir;
 
-       dbg = dwarf_begin(fd, DWARF_C_READ);
+       dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
        if (!dbg) {
                pr_warning("No dwarf info found in the vmlinux - "
                        "please rebuild with CONFIG_DEBUG_INFO=y.\n");
@@ -1450,8 +1828,7 @@ int find_line_range(int fd, struct line_range *lr)
        }
 
        pr_debug("path: %s\n", lr->path);
-       dwarf_end(dbg);
-
+       dwfl_end(dwfl);
        return (ret < 0) ? ret : lf.found;
 }
 
index 4507d519f183b71be35c448dddb37f7610835cfc..bba69d4556999e5081b018857acd230de6740eea 100644 (file)
@@ -22,20 +22,27 @@ extern int find_probe_trace_events(int fd, struct perf_probe_event *pev,
                                    int max_tevs);
 
 /* Find a perf_probe_point from debuginfo */
-extern int find_perf_probe_point(int fd, unsigned long addr,
+extern int find_perf_probe_point(unsigned long addr,
                                 struct perf_probe_point *ppt);
 
+/* Find a line range */
 extern int find_line_range(int fd, struct line_range *lr);
 
+/* Find available variables */
+extern int find_available_vars_at(int fd, struct perf_probe_event *pev,
+                                 struct variable_list **vls, int max_points,
+                                 bool externs);
+
 #include <dwarf.h>
 #include <libdw.h>
+#include <libdwfl.h>
 #include <version.h>
 
 struct probe_finder {
        struct perf_probe_event *pev;           /* Target probe event */
-       struct probe_trace_event *tevs;         /* Result trace events */
-       int                     ntevs;          /* Number of trace events */
-       int                     max_tevs;       /* Max number of trace events */
+
+       /* Callback when a probe point is found */
+       int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf);
 
        /* For function searching */
        int                     lno;            /* Line number */
@@ -53,6 +60,22 @@ struct probe_finder {
        struct probe_trace_arg  *tvar;          /* Current result variable */
 };
 
+struct trace_event_finder {
+       struct probe_finder     pf;
+       struct probe_trace_event *tevs;         /* Found trace events */
+       int                     ntevs;          /* Number of trace events */
+       int                     max_tevs;       /* Max number of trace events */
+};
+
+struct available_var_finder {
+       struct probe_finder     pf;
+       struct variable_list    *vls;           /* Found variable lists */
+       int                     nvls;           /* Number of variable lists */
+       int                     max_vls;        /* Max no. of variable lists */
+       bool                    externs;        /* Find external vars too */
+       bool                    child;          /* Search child scopes */
+};
+
 struct line_finder {
        struct line_range       *lr;            /* Target line range */
 
index 6d0df809a2edab24f28af4bea093d1cda3c2614d..8bc010edca259f48014218a5fcc810d7fe8863b1 100644 (file)
@@ -1,4 +1,3 @@
-#include <slang.h>
 #include "libslang.h"
 #include <linux/compiler.h>
 #include <linux/list.h>