]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Apr 2008 19:40:57 +0000 (12:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Apr 2008 19:40:57 +0000 (12:40 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (120 commits)
  usb: don't update devnum for wusb devices
  wusb: make ep0_reinit available for modules
  wusb: devices dont use a set address
  wusb: teach choose_address() about wireless devices
  wusb: add link wusb-usb device
  wusb: add authenticathed bit to usb_dev
  USB: remove unnecessary type casting of urb->context
  usb serial: more fixes and groundwork for tty changes
  USB: replace remaining __FUNCTION__ occurrences
  USB: usbfs: export the URB_NO_INTERRUPT flag to userspace
  USB: fix compile problems in ehci-hcd
  USB: ehci: qh_completions cleanup and bugfix
  USB: cdc-acm: signedness fix
  USB: add documentation about callbacks
  USB: don't explicitly reenable root-hub status interrupts
  USB: OHCI: turn off RD when remote wakeup is disabled
  USB: HCDs use the do_remote_wakeup flag
  USB: g_file_storage: ignore bulk-out data after invalid CBW
  USB: serial: remove endpoints setting checks from core and header
  USB: serial: remove unneeded number endpoints settings
  ...

327 files changed:
.gitignore
Documentation/ABI/stable/sysfs-class-ubi [new file with mode: 0644]
Documentation/DocBook/Makefile
Documentation/HOWTO
Documentation/arm/Samsung-S3C24XX/NAND.txt [new file with mode: 0644]
Documentation/arm/Samsung-S3C24XX/Overview.txt
Documentation/device-mapper/dm-crypt.txt [new file with mode: 0644]
Documentation/filesystems/nfs-rdma.txt
Documentation/filesystems/seq_file.txt
Documentation/kbuild/modules.txt
Documentation/networking/phy.txt
MAINTAINERS
Makefile
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/sys_sparc.c
arch/x86/boot/.gitignore
arch/x86/kernel/acpi/realmode/.gitignore [new file with mode: 0644]
arch/x86/kernel/alternative.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/paravirt.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/vmi_32.c
arch/x86/mach-voyager/voyager_smp.c
arch/x86/mm/Makefile
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c [new file with mode: 0644]
arch/x86/mm/pgtable_32.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/grant-table.c [new file with mode: 0644]
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/xen-asm.S
arch/x86/xen/xen-ops.h
drivers/Kconfig
drivers/block/xen-blkfront.c
drivers/char/keyboard.c
drivers/hid/usbhid/hid-quirks.c
drivers/ieee1394/dv1394.c
drivers/ieee1394/iso.h
drivers/ieee1394/ohci1394.c
drivers/ieee1394/raw1394.c
drivers/ieee1394/video1394.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/input-polldev.c
drivers/input/joystick/Kconfig
drivers/input/joystick/Makefile
drivers/input/joystick/xpad.c
drivers/input/joystick/zhenhua.c [new file with mode: 0644]
drivers/input/keyboard/aaed2000_kbd.c
drivers/input/keyboard/bf54x-keys.c
drivers/input/keyboard/corgikbd.c
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/jornada680_kbd.c
drivers/input/keyboard/jornada720_kbd.c
drivers/input/keyboard/locomokbd.c
drivers/input/keyboard/omap-keypad.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/keyboard/spitzkbd.c
drivers/input/keyboard/tosakbd.c
drivers/input/misc/cobalt_btns.c
drivers/input/mouse/gpio_mouse.c
drivers/input/serio/Kconfig
drivers/input/serio/Makefile
drivers/input/serio/at32psif.c [new file with mode: 0644]
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/rpckbd.c
drivers/input/tablet/Kconfig
drivers/input/tablet/aiptek.c
drivers/input/tablet/wacom.h
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/corgi_ts.c
drivers/input/touchscreen/jornada720_ts.c
drivers/input/touchscreen/mainstone-wm97xx.c [new file with mode: 0644]
drivers/input/touchscreen/ucb1400_ts.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/input/touchscreen/wm9705.c [new file with mode: 0644]
drivers/input/touchscreen/wm9712.c [new file with mode: 0644]
drivers/input/touchscreen/wm9713.c [new file with mode: 0644]
drivers/input/touchscreen/wm97xx-core.c [new file with mode: 0644]
drivers/input/xen-kbdfront.c [new file with mode: 0644]
drivers/macintosh/mac_hid.c
drivers/md/Makefile
drivers/md/dm-exception-store.c
drivers/md/dm-io.c
drivers/md/dm-kcopyd.c [moved from drivers/md/kcopyd.c with 71% similarity]
drivers/md/dm-log.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm-snap.h
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/kcopyd.h [deleted file]
drivers/mtd/Kconfig
drivers/mtd/Makefile
drivers/mtd/ar7part.c [new file with mode: 0644]
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/chips/cfi_cmdset_0020.c
drivers/mtd/chips/cfi_probe.c
drivers/mtd/chips/cfi_util.c
drivers/mtd/chips/jedec_probe.c
drivers/mtd/cmdlinepart.c
drivers/mtd/devices/Kconfig
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/lart.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/mtdram.c
drivers/mtd/devices/phram.c
drivers/mtd/ftl.c
drivers/mtd/inftlmount.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/bast-flash.c
drivers/mtd/maps/ck804xrom.c
drivers/mtd/maps/integrator-flash.c
drivers/mtd/maps/ixp2000.c
drivers/mtd/maps/ixp4xx.c
drivers/mtd/maps/omap_nor.c
drivers/mtd/maps/pcmciamtd.c
drivers/mtd/maps/physmap.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pmcmsp-flash.c
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/maps/sharpsl-flash.c
drivers/mtd/maps/tqm8xxl.c
drivers/mtd/mtdoops.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/Makefile
drivers/mtd/nand/at91_nand.c
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/cs553x_nand.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/mtd/nand/fsl_upm.c [new file with mode: 0644]
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/ndfc.c
drivers/mtd/nand/orion_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c [new file with mode: 0644]
drivers/mtd/nand/rtc_from4.c
drivers/mtd/nand/s3c2410.c
drivers/mtd/nftlmount.c
drivers/mtd/ofpart.c
drivers/mtd/onenand/onenand_base.c
drivers/mtd/onenand/onenand_bbt.c
drivers/mtd/rfd_ftl.c
drivers/mtd/ubi/Kconfig
drivers/mtd/ubi/build.c
drivers/mtd/ubi/debug.h
drivers/mtd/ubi/gluebi.c
drivers/mtd/ubi/io.c
drivers/mtd/ubi/scan.c
drivers/mtd/ubi/scan.h
drivers/mtd/ubi/ubi-media.h [moved from include/mtd/ubi-header.h with 99% similarity]
drivers/mtd/ubi/ubi.h
drivers/net/arm/at91_ether.c
drivers/net/arm/ep93xx_eth.c
drivers/net/atlx/atl1.c
drivers/net/atlx/atlx.c
drivers/net/ax88796.c
drivers/net/bfin_mac.c
drivers/net/cpmac.c
drivers/net/dm9000.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/e1000.h
drivers/net/e1000e/es2lan.c
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/hw.h
drivers/net/e1000e/netdev.c
drivers/net/e1000e/phy.c
drivers/net/ehea/ehea_main.c
drivers/net/forcedeth.c
drivers/net/gianfar.c
drivers/net/ibm_newemac/core.c
drivers/net/ibm_newemac/core.h
drivers/net/ibm_newemac/mal.c
drivers/net/ibm_newemac/rgmii.c
drivers/net/ibm_newemac/tah.c
drivers/net/ibm_newemac/zmii.c
drivers/net/igb/igb_main.c
drivers/net/irda/ali-ircc.c
drivers/net/irda/pxaficp_ir.c
drivers/net/irda/sa1100_ir.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/jazzsonic.c
drivers/net/korina.c
drivers/net/macb.c
drivers/net/meth.c
drivers/net/mv643xx_eth.c
drivers/net/netx-eth.c
drivers/net/netxen/netxen_nic_hw.c
drivers/net/niu.c
drivers/net/niu.h
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/s2io.c
drivers/net/s2io.h
drivers/net/sgiseeq.c
drivers/net/smc911x.c
drivers/net/smc91x.c
drivers/net/sni_82596.c
drivers/net/tehuti.c
drivers/net/tg3.c
drivers/net/tsi108_eth.c
drivers/net/typhoon.c
drivers/net/ucc_geth.c
drivers/net/via-velocity.c
drivers/net/wan/c101.c
drivers/net/wan/hdlc_fr.c
drivers/net/xen-netfront.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/xen-fbfront.c [new file with mode: 0644]
drivers/xen/Kconfig [new file with mode: 0644]
drivers/xen/Makefile
drivers/xen/balloon.c [new file with mode: 0644]
drivers/xen/events.c [moved from arch/x86/xen/events.c with 82% similarity]
drivers/xen/features.c [moved from arch/x86/xen/features.c with 100% similarity]
drivers/xen/grant-table.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xencomm.c [new file with mode: 0644]
fs/jffs2/README.Locking
fs/jffs2/build.c
fs/jffs2/debug.c
fs/jffs2/debug.h
fs/jffs2/dir.c
fs/jffs2/erase.c
fs/jffs2/file.c
fs/jffs2/fs.c
fs/jffs2/gc.c
fs/jffs2/ioctl.c
fs/jffs2/jffs2_fs_i.h
fs/jffs2/jffs2_fs_sb.h
fs/jffs2/nodelist.h
fs/jffs2/nodemgmt.c
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/jffs2/wbuf.c
fs/jffs2/write.c
fs/lockd/svclock.c
fs/lockd/svcsubs.c
fs/locks.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
include/asm-arm/arch-pxa/pxa3xx_nand.h [new file with mode: 0644]
include/asm-arm/plat-s3c/nand.h
include/asm-x86/fixmap.h
include/asm-x86/fixmap_32.h
include/asm-x86/fixmap_64.h
include/asm-x86/paravirt.h
include/asm-x86/pgalloc.h
include/asm-x86/pgalloc_32.h [deleted file]
include/asm-x86/pgalloc_64.h [deleted file]
include/asm-x86/pgtable.h
include/asm-x86/pgtable_32.h
include/asm-x86/pgtable_64.h
include/asm-x86/xen/events.h [new file with mode: 0644]
include/asm-x86/xen/grant_table.h [new file with mode: 0644]
include/asm-x86/xen/hypercall.h
include/asm-x86/xen/interface.h
include/asm-x86/xen/page.h [new file with mode: 0644]
include/linux/device-mapper.h
include/linux/dm-dirty-log.h [moved from drivers/md/dm-log.h with 53% similarity]
include/linux/dm-io.h [moved from drivers/md/dm-io.h with 84% similarity]
include/linux/dm-kcopyd.h [new file with mode: 0644]
include/linux/fs.h
include/linux/input.h
include/linux/keyboard.h
include/linux/lockd/lockd.h
include/linux/mtd/inftl.h
include/linux/mtd/nftl.h
include/linux/mtd/onenand.h
include/linux/mtd/plat-ram.h
include/linux/nfsd/nfsd.h
include/linux/phy.h
include/linux/serio.h
include/linux/spi/ads7846.h
include/linux/wm97xx.h [new file with mode: 0644]
include/linux/xfrm.h
include/mtd/Kbuild
include/xen/balloon.h [new file with mode: 0644]
include/xen/events.h
include/xen/grant_table.h
include/xen/interface/callback.h [new file with mode: 0644]
include/xen/interface/grant_table.h
include/xen/interface/io/fbif.h [new file with mode: 0644]
include/xen/interface/io/kbdif.h [new file with mode: 0644]
include/xen/interface/io/protocols.h [new file with mode: 0644]
include/xen/interface/memory.h
include/xen/interface/vcpu.h
include/xen/interface/xen.h
include/xen/interface/xencomm.h [new file with mode: 0644]
include/xen/page.h
include/xen/xen-ops.h [new file with mode: 0644]
include/xen/xenbus.h
include/xen/xencomm.h [new file with mode: 0644]
kernel/sched.c
lib/Kconfig.debug
net/can/raw.c
net/core/ethtool.c
net/dccp/probe.c
net/ipv4/tcp_probe.c
net/ipv6/Kconfig
net/ipv6/raw.c
net/key/af_key.c
net/tipc/socket.c
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.host
scripts/Makefile.modpost
scripts/kconfig/Makefile
scripts/mod/modpost.c

index fdcce40226d7d4273a08cc4ef84bb25755a710a4..3016ed30526d4296e219d4ea9dc3ef7e23fe9f23 100644 (file)
@@ -27,6 +27,7 @@ TAGS
 vmlinux*
 !vmlinux.lds.S
 System.map
+Module.markers
 Module.symvers
 !.gitignore
 
diff --git a/Documentation/ABI/stable/sysfs-class-ubi b/Documentation/ABI/stable/sysfs-class-ubi
new file mode 100644 (file)
index 0000000..18d471d
--- /dev/null
@@ -0,0 +1,212 @@
+What:          /sys/class/ubi/
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               The ubi/ class sub-directory belongs to the UBI subsystem and
+               provides general UBI information, per-UBI device information
+               and per-UBI volume information.
+
+What:          /sys/class/ubi/version
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               This file contains version of the latest supported UBI on-media
+               format. Currently it is 1, and there is no plan to change this.
+               However, if in the future UBI needs on-flash format changes
+               which cannot be done in a compatible manner, a new format
+               version will be added. So this is a mechanism for possible
+               future backward-compatible (but forward-incompatible)
+               improvements.
+
+What:          /sys/class/ubiX/
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               The /sys/class/ubi0, /sys/class/ubi1, etc directories describe
+               UBI devices (UBI device 0, 1, etc). They contain general UBI
+               device information and per UBI volume information (each UBI
+               device may have many UBI volumes)
+
+What:          /sys/class/ubi/ubiX/avail_eraseblocks
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Amount of available logical eraseblock. For example, one may
+               create a new UBI volume which has this amount of logical
+               eraseblocks.
+
+What:          /sys/class/ubi/ubiX/bad_peb_count
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Count of bad physical eraseblocks on the underlying MTD device.
+
+What:          /sys/class/ubi/ubiX/bgt_enabled
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Contains ASCII "0\n" if the UBI background thread is disabled,
+               and ASCII "1\n" if it is enabled.
+
+What:          /sys/class/ubi/ubiX/dev
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Major and minor numbers of the character device corresponding
+               to this UBI device (in <major>:<minor> format).
+
+What:          /sys/class/ubi/ubiX/eraseblock_size
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Maximum logical eraseblock size this UBI device may provide. UBI
+               volumes may have smaller logical eraseblock size because of their
+               alignment.
+
+What:          /sys/class/ubi/ubiX/max_ec
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Maximum physical eraseblock erase counter value.
+
+What:          /sys/class/ubi/ubiX/max_vol_count
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Maximum number of volumes which this UBI device may have.
+
+What:          /sys/class/ubi/ubiX/min_io_size
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Minimum input/output unit size. All the I/O may only be done
+               in fractions of the contained number.
+
+What:          /sys/class/ubi/ubiX/mtd_num
+Date:          January 2008
+KernelVersion: 2.6.25
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Number of the underlying MTD device.
+
+What:          /sys/class/ubi/ubiX/reserved_for_bad
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Number of physical eraseblocks reserved for bad block handling.
+
+What:          /sys/class/ubi/ubiX/total_eraseblocks
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Total number of good (not marked as bad) physical eraseblocks on
+               the underlying MTD device.
+
+What:          /sys/class/ubi/ubiX/volumes_count
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Count of volumes on this UBI device.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               The /sys/class/ubi/ubiX/ubiX_0/, /sys/class/ubi/ubiX/ubiX_1/,
+               etc directories describe UBI volumes on UBI device X (volumes
+               0, 1, etc).
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/alignment
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Volume alignment - the value the logical eraseblock size of
+               this volume has to be aligned on. For example, 2048 means that
+               logical eraseblock size is multiple of 2048. In other words,
+               volume logical eraseblock size is UBI device logical eraseblock
+               size aligned to the alignment value.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/corrupted
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Contains ASCII "0\n" if the UBI volume is OK, and ASCII "1\n"
+               if it is corrupted (e.g., due to an interrupted volume update).
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/data_bytes
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               The amount of data this volume contains. This value makes sense
+               only for static volumes, and for dynamic volume it equivalent
+               to the total volume size in bytes.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/dev
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Major and minor numbers of the character device corresponding
+               to this UBI volume (in <major>:<minor> format).
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/name
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Volume name.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/reserved_ebs
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Count of physical eraseblock reserved for this volume.
+               Equivalent to the volume size in logical eraseblocks.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/type
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Volume type. Contains ASCII "dynamic\n" for dynamic volumes and
+               "static\n" for static volumes.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/upd_marker
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Contains ASCII "0\n" if the update marker is not set for this
+               volume, and "1\n" if it is set. The update marker is set when
+               volume update starts, and cleaned when it ends. So the presence
+               of the update marker indicates that the volume is being updated
+               at the moment of the update was interrupted. The later may be
+               checked using the "corrupted" sysfs file.
+
+What:          /sys/class/ubi/ubiX/ubiX_Y/usable_eb_size
+Date:          July 2006
+KernelVersion: 2.6.22
+Contact:       Artem Bityutskiy <dedekind@infradead.org>
+Description:
+               Logical eraseblock size of this volume. Equivalent to logical
+               eraseblock size of the device aligned on the volume alignment
+               value.
index b2b6366bba5163628d7d4e4617921dda5d442d0f..83966e94cc32308579b7b6f041c4299fad94fbd5 100644 (file)
@@ -187,8 +187,11 @@ quiet_cmd_fig2png = FIG2PNG $@
 
 ###
 # Rule to convert a .c file to inline XML documentation
+       gen_xml = :
+ quiet_gen_xml = echo '  GEN     $@'
+silent_gen_xml = :
 %.xml: %.c
-       @echo '  GEN     $@'
+       @$($(quiet)gen_xml)
        @(                            \
           echo "<programlisting>";   \
           expand --tabs=8 < $< |     \
index 54835610b3d6564945ac5be5f32077bf07626923..0291ade44c172882087b49cd6069267c12a4df3c 100644 (file)
@@ -249,9 +249,11 @@ process is as follows:
     release a new -rc kernel every week.
   - Process continues until the kernel is considered "ready", the
     process should last around 6 weeks.
-  - A list of known regressions present in each -rc release is
-    tracked at the following URI:
-    http://kernelnewbies.org/known_regressions
+  - Known regressions in each release are periodically posted to the 
+    linux-kernel mailing list.  The goal is to reduce the length of 
+    that list to zero before declaring the kernel to be "ready," but, in
+    the real world, a small number of regressions often remain at 
+    release time.
 
 It is worth mentioning what Andrew Morton wrote on the linux-kernel
 mailing list about kernel releases:
@@ -261,7 +263,7 @@ mailing list about kernel releases:
 
 2.6.x.y -stable kernel tree
 ---------------------------
-Kernels with 4 digit versions are -stable kernels. They contain
+Kernels with 4-part versions are -stable kernels. They contain
 relatively small and critical fixes for security problems or significant
 regressions discovered in a given 2.6.x kernel.
 
@@ -273,7 +275,10 @@ If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
 kernel is the current stable kernel.
 
 2.6.x.y are maintained by the "stable" team <stable@kernel.org>, and are
-released almost every other week.
+released as needs dictate.  The normal release period is approximately 
+two weeks, but it can be longer if there are no pressing problems.  A
+security-related problem, instead, can cause a release to happen almost
+instantly.
 
 The file Documentation/stable_kernel_rules.txt in the kernel tree
 documents what kinds of changes are acceptable for the -stable tree, and
@@ -298,7 +303,9 @@ a while Andrew or the subsystem maintainer pushes it on to Linus for
 inclusion in mainline.
 
 It is heavily encouraged that all new patches get tested in the -mm tree
-before they are sent to Linus for inclusion in the main kernel tree.
+before they are sent to Linus for inclusion in the main kernel tree.  Code
+which does not make an appearance in -mm before the opening of the merge
+window will prove hard to merge into the mainline.
 
 These kernels are not appropriate for use on systems that are supposed
 to be stable and they are more risky to run than any of the other
@@ -354,11 +361,12 @@ Here is a list of some of the different kernel trees available:
     - SCSI, James Bottomley <James.Bottomley@SteelEye.com>
        git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
 
+    - x86, Ingo Molnar <mingo@elte.hu>
+       git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+
   quilt trees:
-    - USB, PCI, Driver Core, and I2C, Greg Kroah-Hartman <gregkh@suse.de>
+    - USB, Driver Core, and I2C, Greg Kroah-Hartman <gregkh@suse.de>
        kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
-    - x86-64, partly i386, Andi Kleen <ak@suse.de>
-        ftp.firstfloor.org:/pub/ak/x86_64/quilt/
 
   Other kernel trees can be found listed at http://git.kernel.org/ and in
   the MAINTAINERS file.
@@ -392,8 +400,8 @@ If you want to be advised of the future bug reports, you can subscribe to the
 bugme-new mailing list (only new bug reports are mailed here) or to the
 bugme-janitor mailing list (every change in the bugzilla is mailed here)
 
-       http://lists.osdl.org/mailman/listinfo/bugme-new
-       http://lists.osdl.org/mailman/listinfo/bugme-janitors
+       http://lists.linux-foundation.org/mailman/listinfo/bugme-new
+       http://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 
diff --git a/Documentation/arm/Samsung-S3C24XX/NAND.txt b/Documentation/arm/Samsung-S3C24XX/NAND.txt
new file mode 100644 (file)
index 0000000..bc478a3
--- /dev/null
@@ -0,0 +1,30 @@
+                       S3C24XX NAND Support
+                       ====================
+
+Introduction
+------------
+
+Small Page NAND
+---------------
+
+The driver uses a 512 byte (1 page) ECC code for this setup. The
+ECC code is not directly compatible with the default kernel ECC
+code, so the driver enforces its own OOB layout and ECC parameters
+
+Large Page NAND
+---------------
+
+The driver is capable of handling NAND flash with a 2KiB page
+size, with support for hardware ECC generation and correction.
+
+Unlike the 512byte page mode, the driver generates ECC data for
+each 256 byte block in an 2KiB page. This means that more than
+one error in a page can be rectified. It also means that the
+OOB layout remains the default kernel layout for these flashes.
+
+
+Document Author
+---------------
+
+Ben Dooks, Copyright 2007 Simtec Electronics
+
index c31b76fa66c462601a92054221baea08643057da..d04e1e30c47f8ff1a8613a369506e58cf5a41a21 100644 (file)
@@ -156,6 +156,8 @@ NAND
   controller. If there are any problems the latest linux-mtd
   code can be found from http://www.linux-mtd.infradead.org/
 
+  For more information see Documentation/arm/Samsung-S3C24XX/NAND.txt
+
 
 Serial
 ------
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
new file mode 100644 (file)
index 0000000..6680cab
--- /dev/null
@@ -0,0 +1,52 @@
+dm-crypt
+=========
+
+Device-Mapper's "crypt" target provides transparent encryption of block devices
+using the kernel crypto API.
+
+Parameters: <cipher> <key> <iv_offset> <device path> <offset>
+
+<cipher>
+    Encryption cipher and an optional IV generation mode.
+    (In format cipher-chainmode-ivopts:ivmode).
+    Examples:
+       des
+       aes-cbc-essiv:sha256
+       twofish-ecb
+
+    /proc/crypto contains supported crypto modes
+
+<key>
+    Key used for encryption. It is encoded as a hexadecimal number.
+    You can only use key sizes that are valid for the selected cipher.
+
+<iv_offset>
+    The IV offset is a sector count that is added to the sector number
+    before creating the IV.
+
+<device path>
+    This is the device that is going to be used as backend and contains the
+    encrypted data.  You can specify it as a path like /dev/xxx or a device
+    number <major>:<minor>.
+
+<offset>
+    Starting sector within the device where the encrypted data begins.
+
+Example scripts
+===============
+LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
+encryption with dm-crypt using the 'cryptsetup' utility, see
+http://luks.endorphin.org/
+
+[[
+#!/bin/sh
+# Create a crypt device using dmsetup
+dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+]]
+
+[[
+#!/bin/sh
+# Create a crypt device using cryptsetup and LUKS header with default cipher
+cryptsetup luksFormat $1
+cryptsetup luksOpen $1 crypt1
+]]
index 1ae34879574b6c616f9de8f1c15851096772502e..d0ec45ae4e7dfa4585fa74c0757be4d80dc02bf5 100644 (file)
@@ -5,7 +5,7 @@
 ################################################################################
 
  Author: NetApp and Open Grid Computing
- Date: February 25, 2008
+ Date: April 15, 2008
 
 Table of Contents
 ~~~~~~~~~~~~~~~~~
@@ -197,12 +197,16 @@ NFS/RDMA Setup
   - On the server system, configure the /etc/exports file and
     start the NFS/RDMA server.
 
-    Exports entries with the following format have been tested:
+    Exports entries with the following formats have been tested:
 
-    /vol0   10.97.103.47(rw,async) 192.168.0.47(rw,async,insecure,no_root_squash)
+    /vol0   192.168.0.47(fsid=0,rw,async,insecure,no_root_squash)
+    /vol0   192.168.0.0/255.255.255.0(fsid=0,rw,async,insecure,no_root_squash)
 
-    Here the first IP address is the client's Ethernet address and the second
-    IP address is the clients IPoIB address.
+    The IP address(es) is(are) the client's IPoIB address for an InfiniBand HCA or the
+    cleint's iWARP address(es) for an RNIC.
+
+    NOTE: The "insecure" option must be used because the NFS/RDMA client does not
+    use a reserved port.
 
  Each time a machine boots:
 
index 7fb8e6dc62bfb596032725264c3b911a0cca2b2b..b843743aa0b5928630771a0ba6e22ec65c206654 100644 (file)
@@ -122,8 +122,7 @@ stop() is the place to free it.
        }
 
 Finally, the show() function should format the object currently pointed to
-by the iterator for output. It should return zero, or an error code if
-something goes wrong. The example module's show() function is:
+by the iterator for output.  The example module's show() function is:
 
        static int ct_seq_show(struct seq_file *s, void *v)
        {
@@ -132,6 +131,12 @@ something goes wrong. The example module's show() function is:
                return 0;
        }
 
+If all is well, the show() function should return zero.  A negative error
+code in the usual manner indicates that something went wrong; it will be
+passed back to user space.  This function can also return SEQ_SKIP, which
+causes the current item to be skipped; if the show() function has already
+generated output before returning SEQ_SKIP, that output will be dropped.
+
 We will look at seq_printf() in a moment. But first, the definition of the
 seq_file iterator is finished by creating a seq_operations structure with
 the four functions we have just defined:
@@ -182,12 +187,18 @@ The first two output a single character and a string, just like one would
 expect. seq_escape() is like seq_puts(), except that any character in s
 which is in the string esc will be represented in octal form in the output.
 
-There is also a function for printing filenames:
+There is also a pair of functions for printing filenames:
 
        int seq_path(struct seq_file *m, struct path *path, char *esc);
+       int seq_path_root(struct seq_file *m, struct path *path,
+                         struct path *root, char *esc)
 
 Here, path indicates the file of interest, and esc is a set of characters
-which should be escaped in the output.
+which should be escaped in the output.  A call to seq_path() will output
+the path relative to the current process's filesystem root.  If a different
+root is desired, it can be used with seq_path_root().  Note that, if it
+turns out that path cannot be reached from root, the value of root will be
+changed in seq_file_root() to a root which *does* work.
 
 
 Making it all work
index 1d247d59ad56fd57254c3bc5ec5e8c7b3291ed58..1821c077b435cd64eac847dc1ec69d5386dc6ad6 100644 (file)
@@ -486,7 +486,7 @@ Module.symvers contains a list of all exported symbols from a kernel build.
        Sometimes, an external module uses exported symbols from another
        external module. Kbuild needs to have full knowledge on all symbols
        to avoid spitting out warnings about undefined symbols.
-       Two solutions exist to let kbuild know all symbols of more than
+       Three solutions exist to let kbuild know all symbols of more than
        one external module.
        The method with a top-level kbuild file is recommended but may be
        impractical in certain situations.
@@ -523,6 +523,13 @@ Module.symvers contains a list of all exported symbols from a kernel build.
                containing the sum of all symbols defined and not part of the
                kernel.
 
+       Use make variable KBUILD_EXTRA_SYMBOLS in the Makefile
+               If it is impractical to copy Module.symvers from another
+               module, you can assign a space separated list of files to
+               KBUILD_EXTRA_SYMBOLS in your Makfile. These files will be
+               loaded by modpost during the initialisation of its symbol
+               tables.
+
 === 8. Tips & Tricks
 
 --- 8.1 Testing for CONFIG_FOO_BAR
index 0bc95eab151288d5ba11c2bca8fdc24c2d0c56c8..8df6a7b0e66cdfd3b1de96ed038a062b71be8008 100644 (file)
@@ -1,7 +1,7 @@
 
 -------
 PHY Abstraction Layer
-(Updated 2006-11-30)
+(Updated 2008-04-08)
 
 Purpose
 
@@ -291,3 +291,39 @@ Writing a PHY driver
  Feel free to look at the Marvell, Cicada, and Davicom drivers in
  drivers/net/phy/ for examples (the lxt and qsemi drivers have
  not been tested as of this writing)
+
+Board Fixups
+
+ Sometimes the specific interaction between the platform and the PHY requires
+ special handling.  For instance, to change where the PHY's clock input is,
+ or to add a delay to account for latency issues in the data path.  In order
+ to support such contingencies, the PHY Layer allows platform code to register
+ fixups to be run when the PHY is brought up (or subsequently reset).
+
+ When the PHY Layer brings up a PHY it checks to see if there are any fixups
+ registered for it, matching based on UID (contained in the PHY device's phy_id
+ field) and the bus identifier (contained in phydev->dev.bus_id).  Both must
+ match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as
+ wildcards for the bus ID and UID, respectively.
+
+ When a match is found, the PHY layer will invoke the run function associated
+ with the fixup.  This function is passed a pointer to the phy_device of
+ interest.  It should therefore only operate on that PHY.
+
+ The platform code can either register the fixup using phy_register_fixup():
+
+       int phy_register_fixup(const char *phy_id,
+               u32 phy_uid, u32 phy_uid_mask,
+               int (*run)(struct phy_device *));
+
+ Or using one of the two stubs, phy_register_fixup_for_uid() and
+ phy_register_fixup_for_id():
+
+ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
+               int (*run)(struct phy_device *));
+ int phy_register_fixup_for_id(const char *phy_id,
+               int (*run)(struct phy_device *));
+
+ The stubs set one of the two matching criteria, and set the other one to
+ match anything.
+
index f50e927a118992e8d69953675c65a87998a63231..2edec00c12c0cc6fcf3a221f0810825a8eeb04ff 100644 (file)
@@ -4356,6 +4356,16 @@ L:       linux-wireless@vger.kernel.org
 W:     http://oops.ghostprotocols.net:81/blog
 S:     Maintained
 
+WM97XX TOUCHSCREEN DRIVERS
+P:     Mark Brown
+M:     broonie@opensource.wolfsonmicro.com
+P:     Liam Girdwood
+M:     liam.girdwood@wolfsonmicro.com
+L:     linux-input@vger.kernel.org
+T:     git git://opensource.wolfsonmicro.com/linux-2.6-touch
+W:     http://opensource.wolfsonmicro.com/node/7
+S:     Supported
+
 X.25 NETWORK LAYER
 P:     Henner Eisen
 M:     eis@baty.hanse.de
index 3dbc826bb8e6d73c175d772a497f17accd61c23e..fc3411e6f071fb34be17e7bd937e38634ab78cfd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -507,6 +507,10 @@ else
 KBUILD_CFLAGS  += -O2
 endif
 
+ifneq (CONFIG_FRAME_WARN,0)
+KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
+endif
+
 # Force gcc to behave correct even for buggy distributions
 # Arch Makefiles may override this setting
 KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
@@ -1396,7 +1400,7 @@ define xtags
            $(all-kconfigs) | xargs $1 -a \
                --langdef=kconfig \
                --language-force=kconfig \
-               --regex-kconfig='/^[[:blank:]]*config[[:blank:]]+([[:alnum:]_]+)/\1/'; \
+               --regex-kconfig='/^[[:blank:]]*(menu|)config[[:blank:]]+([[:alnum:]_]+)/\2/'; \
            $(all-defconfigs) | xargs -r $1 -a \
                --langdef=dotconfig \
                --language-force=dotconfig \
@@ -1404,7 +1408,7 @@ define xtags
        elif $1 --version 2>&1 | grep -iq emacs; then \
            $(all-sources) | xargs $1 -a; \
            $(all-kconfigs) | xargs $1 -a \
-               --regex='/^[ \t]*config[ \t]+\([a-zA-Z0-9_]+\)/\1/'; \
+               --regex='/^[ \t]*(menu|)config[ \t]+\([a-zA-Z0-9_]+\)/\2/'; \
            $(all-defconfigs) | xargs -r $1 -a \
                --regex='/^#?[ \t]?\(CONFIG_[a-zA-Z0-9_]+\)/\1/'; \
        else \
@@ -1539,7 +1543,6 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN   $(wildcard $(rm-files))
       cmd_rmfiles = rm -f $(rm-files)
 
 # Run depmod only if we have System.map and depmod is executable
-# and we build for the host arch
 quiet_cmd_depmod = DEPMOD  $(KERNELRELEASE)
       cmd_depmod = \
        if [ -r System.map -a -x $(DEPMOD) ]; then                              \
index 524b88920947d268233825aefec54b549d0ba21f..409dd71f2738c88f59ae367bdc93aceb2e83455a 100644 (file)
@@ -866,14 +866,21 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
        void *info = call_data->info;
 
        clear_softint(1 << irq);
+
+       irq_enter();
+
+       if (!call_data->wait) {
+               /* let initiator proceed after getting data */
+               atomic_inc(&call_data->finished);
+       }
+
+       func(info);
+
+       irq_exit();
+
        if (call_data->wait) {
                /* let initiator proceed only after completion */
-               func(info);
                atomic_inc(&call_data->finished);
-       } else {
-               /* let initiator proceed after getting data */
-               atomic_inc(&call_data->finished);
-               func(info);
        }
 }
 
@@ -1032,7 +1039,9 @@ void smp_receive_signal(int cpu)
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
+       irq_enter();
        clear_softint(1 << irq);
+       irq_exit();
 }
 
 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
@@ -1040,6 +1049,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
        struct mm_struct *mm;
        unsigned long flags;
 
+       irq_enter();
+
        clear_softint(1 << irq);
 
        /* See if we need to allocate a new TLB context because
@@ -1059,6 +1070,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
        load_secondary_context(mm);
        __flush_tlb_mm(CTX_HWBITS(mm->context),
                       SECONDARY_CONTEXT);
+
+       irq_exit();
 }
 
 void smp_new_mmu_context_version(void)
@@ -1217,6 +1230,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
 
+       irq_enter();
+
        preempt_disable();
 
        __asm__ __volatile__("flushw");
@@ -1229,6 +1244,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
        prom_world(0);
 
        preempt_enable();
+
+       irq_exit();
 }
 
 /* /proc/profile writes can call this, don't __init it please. */
index 73ed01ba40dc340951508ff819a296914efeaad7..8d4761f15fa932cdede6dc4dc4336fdaa2b1cc88 100644 (file)
@@ -454,8 +454,8 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
                        err = sys_semget(first, (int)second, (int)third);
                        goto out;
                case SEMCTL: {
-                       err = sys_semctl(first, third,
-                                        (int)second | IPC_64,
+                       err = sys_semctl(first, second,
+                                        (int)third | IPC_64,
                                         (union semun) ptr);
                        goto out;
                }
index b1bdc4c6f9f29c8de2ad322980f5d1f28c0c9a8e..172cf8a98bdd2541e0d930e2a92903f5f8f9d448 100644 (file)
@@ -1,7 +1,8 @@
 bootsect
 bzImage
+cpustr.h
+mkcpustr
+offsets.h
 setup
 setup.bin
 setup.elf
-cpustr.h
-mkcpustr
diff --git a/arch/x86/kernel/acpi/realmode/.gitignore b/arch/x86/kernel/acpi/realmode/.gitignore
new file mode 100644 (file)
index 0000000..58f1f48
--- /dev/null
@@ -0,0 +1,3 @@
+wakeup.bin
+wakeup.elf
+wakeup.lds
index df4099dc1c682eb4af7c4af22badf4e4f3cd770c..65c7857a90ddfc6ff084c6817baba045ced0ad71 100644 (file)
@@ -511,31 +511,30 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
        unsigned long flags;
        char *vaddr;
        int nr_pages = 2;
+       struct page *pages[2];
+       int i;
 
-       BUG_ON(len > sizeof(long));
-       BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1))
-               - ((long)addr & ~(sizeof(long) - 1)));
-       if (kernel_text_address((unsigned long)addr)) {
-               struct page *pages[2] = { virt_to_page(addr),
-                       virt_to_page(addr + PAGE_SIZE) };
-               if (!pages[1])
-                       nr_pages = 1;
-               vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
-               BUG_ON(!vaddr);
-               local_irq_save(flags);
-               memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
-               local_irq_restore(flags);
-               vunmap(vaddr);
+       if (!core_kernel_text((unsigned long)addr)) {
+               pages[0] = vmalloc_to_page(addr);
+               pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
        } else {
-               /*
-                * modules are in vmalloc'ed memory, always writable.
-                */
-               local_irq_save(flags);
-               memcpy(addr, opcode, len);
-               local_irq_restore(flags);
+               pages[0] = virt_to_page(addr);
+               WARN_ON(!PageReserved(pages[0]));
+               pages[1] = virt_to_page(addr + PAGE_SIZE);
        }
+       BUG_ON(!pages[0]);
+       if (!pages[1])
+               nr_pages = 1;
+       vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+       BUG_ON(!vaddr);
+       local_irq_save(flags);
+       memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+       local_irq_restore(flags);
+       vunmap(vaddr);
        sync_core();
        /* Could also do a CLFLUSH here to speed up CPU recovery; but
           that causes hangs on some VIA CPUs. */
+       for (i = 0; i < len; i++)
+               BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
        return addr;
 }
index f0f8934fc30324fc29af50dc436db6f14b198a93..2a609dc3271c3dc2008ad740133f314e5ed177b6 100644 (file)
@@ -409,7 +409,7 @@ restore_nocheck_notrace:
 irq_return:
        INTERRUPT_RETURN
 .section .fixup,"ax"
-iret_exc:
+ENTRY(iret_exc)
        pushl $0                        # no error code
        pushl $do_iret_error
        jmp error_code
@@ -1017,6 +1017,13 @@ ENTRY(kernel_thread_helper)
 ENDPROC(kernel_thread_helper)
 
 #ifdef CONFIG_XEN
+/* Xen doesn't set %esp to be precisely what the normal sysenter
+   entrypoint expects, so fix it up before using the normal path. */
+ENTRY(xen_sysenter_target)
+       RING0_INT_FRAME
+       addl $5*4, %esp         /* remove xen-provided frame */
+       jmp sysenter_past_esp
+
 ENTRY(xen_hypervisor_callback)
        CFI_STARTPROC
        pushl $0
@@ -1035,8 +1042,9 @@ ENTRY(xen_hypervisor_callback)
        cmpl $xen_iret_end_crit,%eax
        jae  1f
 
-       call xen_iret_crit_fixup
+       jmp  xen_iret_crit_fixup
 
+ENTRY(xen_do_upcall)
 1:     mov %esp, %eax
        call xen_evtchn_do_upcall
        jmp  ret_from_intr
index 3733412d135773846c1b7259de4dc8dfbed99c46..74f0c5ea2a0388af848faa058804cd9df86f4391 100644 (file)
@@ -366,11 +366,13 @@ struct pv_mmu_ops pv_mmu_ops = {
        .flush_tlb_single = native_flush_tlb_single,
        .flush_tlb_others = native_flush_tlb_others,
 
-       .alloc_pt = paravirt_nop,
-       .alloc_pd = paravirt_nop,
-       .alloc_pd_clone = paravirt_nop,
-       .release_pt = paravirt_nop,
-       .release_pd = paravirt_nop,
+       .alloc_pte = paravirt_nop,
+       .alloc_pmd = paravirt_nop,
+       .alloc_pmd_clone = paravirt_nop,
+       .alloc_pud = paravirt_nop,
+       .release_pte = paravirt_nop,
+       .release_pmd = paravirt_nop,
+       .release_pud = paravirt_nop,
 
        .set_pte = native_set_pte,
        .set_pte_at = native_set_pte_at,
index 19c9386ac1187e4f9b25144437732dc45b11a98c..1791a751a772d0d6d1a6256591707ad5fca8c237 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/apic.h>
 #include <asm/desc.h>
 #include <asm/hpet.h>
+#include <asm/pgtable.h>
 #include <asm/reboot_fixups.h>
 #include <asm/reboot.h>
 
@@ -15,7 +16,6 @@
 # include <linux/dmi.h>
 # include <linux/ctype.h>
 # include <linux/mc146818rtc.h>
-# include <asm/pgtable.h>
 #else
 # include <asm/iommu.h>
 #endif
@@ -275,7 +275,7 @@ void machine_real_restart(unsigned char *code, int length)
        /* Remap the kernel at virtual address zero, as well as offset zero
           from the kernel segment.  This assumes the kernel segment starts at
           virtual address PAGE_OFFSET. */
-       memcpy(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+       memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
                sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
 
        /*
index ade371f9663a9efa6ca0b37636b139cf3297e338..eef79e84145f5d7ba6c6bc0e0d5948711d755c0e 100644 (file)
@@ -1039,8 +1039,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
 
 #ifdef CONFIG_X86_32
        /* init low mem mapping */
-       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
-                       min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
+       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
        flush_tlb_all();
 #endif
 
index 12affe1f9bce8d7da1a872880bf41e581e3281c7..956f38927aa7c5533cc41fd3c2ba0ea807ccb6dd 100644 (file)
@@ -320,7 +320,7 @@ static void check_zeroed_page(u32 pfn, int type, struct page *page)
         * pdes need to be zeroed.
         */
        if (type & VMI_PAGE_CLONE)
-               limit = USER_PTRS_PER_PGD;
+               limit = KERNEL_PGD_BOUNDARY;
        for (i = 0; i < limit; i++)
                BUG_ON(ptr[i]);
 }
@@ -392,13 +392,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
 }
 #endif
 
-static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
+static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn)
 {
        vmi_set_page_type(pfn, VMI_PAGE_L1);
        vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
 }
 
-static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn)
+static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
 {
        /*
         * This call comes in very early, before mem_map is setup.
@@ -409,20 +409,20 @@ static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn)
        vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
 }
 
-static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
+static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
 {
        vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
        vmi_check_page_type(clonepfn, VMI_PAGE_L2);
        vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
 }
 
-static void vmi_release_pt(u32 pfn)
+static void vmi_release_pte(u32 pfn)
 {
        vmi_ops.release_page(pfn, VMI_PAGE_L1);
        vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
 }
 
-static void vmi_release_pd(u32 pfn)
+static void vmi_release_pmd(u32 pfn)
 {
        vmi_ops.release_page(pfn, VMI_PAGE_L2);
        vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
@@ -871,15 +871,15 @@ static inline int __init activate_vmi(void)
 
        vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
        if (vmi_ops.allocate_page) {
-               pv_mmu_ops.alloc_pt = vmi_allocate_pt;
-               pv_mmu_ops.alloc_pd = vmi_allocate_pd;
-               pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone;
+               pv_mmu_ops.alloc_pte = vmi_allocate_pte;
+               pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
+               pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
        }
 
        vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
        if (vmi_ops.release_page) {
-               pv_mmu_ops.release_pt = vmi_release_pt;
-               pv_mmu_ops.release_pd = vmi_release_pd;
+               pv_mmu_ops.release_pte = vmi_release_pte;
+               pv_mmu_ops.release_pmd = vmi_release_pmd;
        }
 
        /* Set linear is needed in all cases */
index d05722121d2412b76bb6aea832935c5f16aee773..6e2c4efce0ef12afe8878eccc06cf4ec3144e670 100644 (file)
@@ -543,8 +543,8 @@ static void __init do_boot_cpu(__u8 cpu)
                hijack_source.idt.Offset, stack_start.sp));
 
        /* init lowmem identity mapping */
-       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
-                       min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
+       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
        flush_tlb_all();
 
        if (quad_boot) {
index 20941d2954e234d057f7bb326212d6533aeb184b..b7b3e4c7cfc9c4cd35e953ba75d64ad7cde484f5 100644 (file)
@@ -1,5 +1,5 @@
 obj-y  :=  init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-           pat.o
+           pat.o pgtable.o
 
 obj-$(CONFIG_X86_32)           += pgtable_32.o
 
index 9ec62da85fd79a75fc450b58c4ea54e4c77f857b..08aa1878fad4c22440ffd5956f16be742b438705 100644 (file)
@@ -71,7 +71,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
        if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
                pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 
-               paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+               paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
                set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
                pud = pud_offset(pgd, 0);
                BUG_ON(pmd_table != pmd_offset(pud, 0));
@@ -100,7 +100,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
                                (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
                }
 
-               paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
+               paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
                BUG_ON(page_table != pte_offset_kernel(pmd, 0));
        }
@@ -365,7 +365,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
 
                pte_clear(NULL, va, pte);
        }
-       paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
+       paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
 }
 
 void __init native_pagetable_setup_done(pgd_t *base)
@@ -457,7 +457,7 @@ void zap_low_mappings(void)
         * Note that "pgd_clear()" doesn't do it for
         * us, because pgd_clear() is a no-op on i386.
         */
-       for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+       for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
 #ifdef CONFIG_X86_PAE
                set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
 #else
index 1ff7906a9a4dbc7afa6dfdd24f274e077cd26773..b798e7b92b1709819a5985f51d40782363da6c51 100644 (file)
@@ -135,7 +135,7 @@ static __init void *spp_getpage(void)
        return ptr;
 }
 
-static __init void
+static void
 set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
 {
        pgd_t *pgd;
@@ -173,7 +173,7 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
        new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
 
        pte = pte_offset_kernel(pmd, vaddr);
-       if (!pte_none(*pte) &&
+       if (!pte_none(*pte) && pte_val(new_pte) &&
            pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
                pte_ERROR(*pte);
        set_pte(pte, new_pte);
@@ -214,8 +214,7 @@ void __init cleanup_highmap(void)
 }
 
 /* NOTE: this is meant to be run only at boot */
-void __init
-__set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
        unsigned long address = __fix_to_virt(idx);
 
index 3a4baf95e24d5a5cee6626a5b8789064e9dce564..36a3f7ded6262c7063ee291e1aa55082a917771f 100644 (file)
@@ -407,7 +407,7 @@ void __init early_ioremap_clear(void)
 
        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
        pmd_clear(pmd);
-       paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
+       paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
        __flush_tlb_all();
 }
 
index c29ebd037254fb3252b75944e9649cdac1b3059a..bd5e05c654dccfb33d368718e4b7acdbdd16d5d0 100644 (file)
@@ -483,9 +483,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
                goto out_unlock;
 
        pbase = (pte_t *)page_address(base);
-#ifdef CONFIG_X86_32
-       paravirt_alloc_pt(&init_mm, page_to_pfn(base));
-#endif
+       paravirt_alloc_pte(&init_mm, page_to_pfn(base));
        ref_prot = pte_pgprot(pte_clrhuge(*kpte));
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
new file mode 100644 (file)
index 0000000..5015976
--- /dev/null
@@ -0,0 +1,276 @@
+#include <linux/mm.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+       return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+}
+
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       struct page *pte;
+
+#ifdef CONFIG_HIGHPTE
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
+#else
+       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+#endif
+       if (pte)
+               pgtable_page_ctor(pte);
+       return pte;
+}
+
+void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
+{
+       pgtable_page_dtor(pte);
+       paravirt_release_pte(page_to_pfn(pte));
+       tlb_remove_page(tlb, pte);
+}
+
+#if PAGETABLE_LEVELS > 2
+void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+{
+       paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
+       tlb_remove_page(tlb, virt_to_page(pmd));
+}
+
+#if PAGETABLE_LEVELS > 3
+void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+{
+       paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
+       tlb_remove_page(tlb, virt_to_page(pud));
+}
+#endif /* PAGETABLE_LEVELS > 3 */
+#endif /* PAGETABLE_LEVELS > 2 */
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+       struct page *page = virt_to_page(pgd);
+
+       list_add(&page->lru, &pgd_list);
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+       struct page *page = virt_to_page(pgd);
+
+       list_del(&page->lru);
+}
+
+#define UNSHARED_PTRS_PER_PGD                          \
+       (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+
+static void pgd_ctor(void *p)
+{
+       pgd_t *pgd = p;
+       unsigned long flags;
+
+       /* Clear usermode parts of PGD */
+       memset(pgd, 0, KERNEL_PGD_BOUNDARY*sizeof(pgd_t));
+
+       spin_lock_irqsave(&pgd_lock, flags);
+
+       /* If the pgd points to a shared pagetable level (either the
+          ptes in non-PAE, or shared PMD in PAE), then just copy the
+          references from swapper_pg_dir. */
+       if (PAGETABLE_LEVELS == 2 ||
+           (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
+           PAGETABLE_LEVELS == 4) {
+               clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
+                               swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                               KERNEL_PGD_PTRS);
+               paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
+                                        __pa(swapper_pg_dir) >> PAGE_SHIFT,
+                                        KERNEL_PGD_BOUNDARY,
+                                        KERNEL_PGD_PTRS);
+       }
+
+       /* list required to sync kernel mapping updates */
+       if (!SHARED_KERNEL_PMD)
+               pgd_list_add(pgd);
+
+       spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+static void pgd_dtor(void *pgd)
+{
+       unsigned long flags; /* can be called from interrupt context */
+
+       if (SHARED_KERNEL_PMD)
+               return;
+
+       spin_lock_irqsave(&pgd_lock, flags);
+       pgd_list_del(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * -- wli
+ */
+
+#ifdef CONFIG_X86_PAE
+/*
+ * Mop up any pmd pages which may still be attached to the pgd.
+ * Normally they will be freed by munmap/exit_mmap, but any pmd we
+ * preallocate which never got a corresponding vma will need to be
+ * freed manually.
+ */
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
+{
+       int i;
+
+       for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
+               pgd_t pgd = pgdp[i];
+
+               if (pgd_val(pgd) != 0) {
+                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+                       pgdp[i] = native_make_pgd(0);
+
+                       paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+                       pmd_free(mm, pmd);
+               }
+       }
+}
+
+/*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+ * processor notices the update.  Since this is expensive, and
+ * all 4 top-level entries are used almost immediately in a
+ * new process's life, we just pre-populate them here.
+ *
+ * Also, if we're in a paravirt environment where the kernel pmd is
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+{
+       pud_t *pud;
+       unsigned long addr;
+       int i;
+
+       pud = pud_offset(pgd, 0);
+       for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
+            i++, pud++, addr += PUD_SIZE) {
+               pmd_t *pmd = pmd_alloc_one(mm, addr);
+
+               if (!pmd) {
+                       pgd_mop_up_pmds(mm, pgd);
+                       return 0;
+               }
+
+               if (i >= KERNEL_PGD_BOUNDARY)
+                       memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+                              sizeof(pmd_t) * PTRS_PER_PMD);
+
+               pud_populate(mm, pud, pmd);
+       }
+
+       return 1;
+}
+
+void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+{
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+
+       /* Note: almost everything apart from _PAGE_PRESENT is
+          reserved at the pmd (PDPT) level. */
+       set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
+
+       /*
+        * According to Intel App note "TLBs, Paging-Structure Caches,
+        * and Their Invalidation", April 2007, document 317080-001,
+        * section 8.1: in PAE mode we explicitly have to flush the
+        * TLB via cr3 if the top-level pgd is changed...
+        */
+       if (mm == current->active_mm)
+               write_cr3(read_cr3());
+}
+#else  /* !CONFIG_X86_PAE */
+/* No need to prepopulate any pagetable entries in non-PAE modes. */
+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+{
+       return 1;
+}
+
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
+{
+}
+#endif /* CONFIG_X86_PAE */
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
+       /* so that alloc_pmd can use it */
+       mm->pgd = pgd;
+       if (pgd)
+               pgd_ctor(pgd);
+
+       if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
+               pgd_dtor(pgd);
+               free_page((unsigned long)pgd);
+               pgd = NULL;
+       }
+
+       return pgd;
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+       pgd_mop_up_pmds(mm, pgd);
+       pgd_dtor(pgd);
+       free_page((unsigned long)pgd);
+}
+
+int ptep_set_access_flags(struct vm_area_struct *vma,
+                         unsigned long address, pte_t *ptep,
+                         pte_t entry, int dirty)
+{
+       int changed = !pte_same(*ptep, entry);
+
+       if (changed && dirty) {
+               *ptep = entry;
+               pte_update_defer(vma->vm_mm, address, ptep);
+               flush_tlb_page(vma, address);
+       }
+
+       return changed;
+}
+
+int ptep_test_and_clear_young(struct vm_area_struct *vma,
+                             unsigned long addr, pte_t *ptep)
+{
+       int ret = 0;
+
+       if (pte_young(*ptep))
+               ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+                                        &ptep->pte);
+
+       if (ret)
+               pte_update(vma->vm_mm, addr, ptep);
+
+       return ret;
+}
+
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+                          unsigned long address, pte_t *ptep)
+{
+       int young;
+
+       young = ptep_test_and_clear_young(vma, address, ptep);
+       if (young)
+               flush_tlb_page(vma, address);
+
+       return young;
+}
index 6fb9e7c6893fd44afad45232355b47c867c7cf75..9ee007be914299ab8dae9d3f52df22ae606816f1 100644 (file)
@@ -173,210 +173,6 @@ void reserve_top_address(unsigned long reserve)
        __VMALLOC_RESERVE += reserve;
 }
 
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-       return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-}
-
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-       struct page *pte;
-
-#ifdef CONFIG_HIGHPTE
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
-#else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-#endif
-       if (pte)
-               pgtable_page_ctor(pte);
-       return pte;
-}
-
-/*
- * List of all pgd's needed for non-PAE so it can invalidate entries
- * in both cached and uncached pgd's; not needed for PAE since the
- * kernel pmd is shared. If PAE were not to share the pmd a similar
- * tactic would be needed. This is essentially codepath-based locking
- * against pageattr.c; it is the unique case in which a valid change
- * of kernel pagetables can't be lazily synchronized by vmalloc faults.
- * vmalloc faults work because attached pagetables are never freed.
- * -- wli
- */
-static inline void pgd_list_add(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-
-       list_add(&page->lru, &pgd_list);
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-
-       list_del(&page->lru);
-}
-
-#define UNSHARED_PTRS_PER_PGD                          \
-       (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
-
-static void pgd_ctor(void *p)
-{
-       pgd_t *pgd = p;
-       unsigned long flags;
-
-       /* Clear usermode parts of PGD */
-       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-
-       spin_lock_irqsave(&pgd_lock, flags);
-
-       /* If the pgd points to a shared pagetable level (either the
-          ptes in non-PAE, or shared PMD in PAE), then just copy the
-          references from swapper_pg_dir. */
-       if (PAGETABLE_LEVELS == 2 ||
-           (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
-               clone_pgd_range(pgd + USER_PTRS_PER_PGD,
-                               swapper_pg_dir + USER_PTRS_PER_PGD,
-                               KERNEL_PGD_PTRS);
-               paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
-                                       __pa(swapper_pg_dir) >> PAGE_SHIFT,
-                                       USER_PTRS_PER_PGD,
-                                       KERNEL_PGD_PTRS);
-       }
-
-       /* list required to sync kernel mapping updates */
-       if (!SHARED_KERNEL_PMD)
-               pgd_list_add(pgd);
-
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-static void pgd_dtor(void *pgd)
-{
-       unsigned long flags; /* can be called from interrupt context */
-
-       if (SHARED_KERNEL_PMD)
-               return;
-
-       spin_lock_irqsave(&pgd_lock, flags);
-       pgd_list_del(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-#ifdef CONFIG_X86_PAE
-/*
- * Mop up any pmd pages which may still be attached to the pgd.
- * Normally they will be freed by munmap/exit_mmap, but any pmd we
- * preallocate which never got a corresponding vma will need to be
- * freed manually.
- */
-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
-{
-       int i;
-
-       for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
-               pgd_t pgd = pgdp[i];
-
-               if (pgd_val(pgd) != 0) {
-                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
-
-                       pgdp[i] = native_make_pgd(0);
-
-                       paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
-                       pmd_free(mm, pmd);
-               }
-       }
-}
-
-/*
- * In PAE mode, we need to do a cr3 reload (=tlb flush) when
- * updating the top-level pagetable entries to guarantee the
- * processor notices the update.  Since this is expensive, and
- * all 4 top-level entries are used almost immediately in a
- * new process's life, we just pre-populate them here.
- *
- * Also, if we're in a paravirt environment where the kernel pmd is
- * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
- * and initialize the kernel pmds here.
- */
-static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
-{
-       pud_t *pud;
-       unsigned long addr;
-       int i;
-
-       pud = pud_offset(pgd, 0);
-       for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
-            i++, pud++, addr += PUD_SIZE) {
-               pmd_t *pmd = pmd_alloc_one(mm, addr);
-
-               if (!pmd) {
-                       pgd_mop_up_pmds(mm, pgd);
-                       return 0;
-               }
-
-               if (i >= USER_PTRS_PER_PGD)
-                       memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
-                              sizeof(pmd_t) * PTRS_PER_PMD);
-
-               pud_populate(mm, pud, pmd);
-       }
-
-       return 1;
-}
-#else  /* !CONFIG_X86_PAE */
-/* No need to prepopulate any pagetable entries in non-PAE modes. */
-static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
-{
-       return 1;
-}
-
-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
-{
-}
-#endif /* CONFIG_X86_PAE */
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-
-       /* so that alloc_pd can use it */
-       mm->pgd = pgd;
-       if (pgd)
-               pgd_ctor(pgd);
-
-       if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
-               pgd_dtor(pgd);
-               free_page((unsigned long)pgd);
-               pgd = NULL;
-       }
-
-       return pgd;
-}
-
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-       pgd_mop_up_pmds(mm, pgd);
-       pgd_dtor(pgd);
-       free_page((unsigned long)pgd);
-}
-
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
-{
-       pgtable_page_dtor(pte);
-       paravirt_release_pt(page_to_pfn(pte));
-       tlb_remove_page(tlb, pte);
-}
-
-#ifdef CONFIG_X86_PAE
-
-void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
-{
-       paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-       tlb_remove_page(tlb, virt_to_page(pmd));
-}
-
-#endif
-
 int pmd_bad(pmd_t pmd)
 {
        WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd));
index 4d5f2649bee4e9e5dc0dd7f9cc0dab20c31c7d0d..2e641be2737e162171f5a96df11a5ad4c30c7476 100644 (file)
@@ -6,7 +6,7 @@ config XEN
        bool "Xen guest support"
        select PARAVIRT
        depends on X86_32
-       depends on X86_CMPXCHG && X86_TSC && !NEED_MULTIPLE_NODES && !(X86_VISWS || X86_VOYAGER)
+       depends on X86_CMPXCHG && X86_TSC && !(X86_VISWS || X86_VOYAGER)
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
index 343df246bd3eedd31d0a35db9736a0cdc904b78a..3d8df981d5fd8e84cb58993a65d4c2d46a0b3407 100644 (file)
@@ -1,4 +1,4 @@
-obj-y          := enlighten.o setup.o features.o multicalls.o mmu.o \
-                       events.o time.o manage.o xen-asm.o
+obj-y          := enlighten.o setup.o multicalls.o mmu.o \
+                       time.o manage.o xen-asm.o grant-table.o
 
 obj-$(CONFIG_SMP)      += smp.o
index c0388220cf9728a8e5b47c6032b0baeef1ed12f9..c8a56e457d61a35c21040bbe91486f9ceb9bf6bf 100644 (file)
@@ -155,7 +155,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
        if (*ax == 1)
                maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
                            (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
-                           (1 << X86_FEATURE_SEP)  |  /* disable SEP */
+                           (1 << X86_FEATURE_MCE)  |  /* disable MCE */
+                           (1 << X86_FEATURE_MCA)  |  /* disable MCA */
                            (1 << X86_FEATURE_ACC));   /* thermal monitoring */
 
        asm(XEN_EMULATE_PREFIX "cpuid"
@@ -531,26 +532,37 @@ static void xen_apic_write(unsigned long reg, u32 val)
 static void xen_flush_tlb(void)
 {
        struct mmuext_op *op;
-       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
 
        op = mcs.args;
        op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
        MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 
        xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
 }
 
 static void xen_flush_tlb_single(unsigned long addr)
 {
        struct mmuext_op *op;
-       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+       struct multicall_space mcs;
+
+       preempt_disable();
 
+       mcs = xen_mc_entry(sizeof(*op));
        op = mcs.args;
        op->cmd = MMUEXT_INVLPG_LOCAL;
        op->arg1.linear_addr = addr & PAGE_MASK;
        MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 
        xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
 }
 
 static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
@@ -655,15 +667,17 @@ static void xen_write_cr3(unsigned long cr3)
 
 /* Early in boot, while setting up the initial pagetable, assume
    everything is pinned. */
-static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
 {
+#ifdef CONFIG_FLATMEM
        BUG_ON(mem_map);        /* should only be used early */
+#endif
        make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
 }
 
-/* Early release_pt assumes that all pts are pinned, since there's
+/* Early release_pte assumes that all pts are pinned, since there's
    only init_mm and anything attached to that is pinned. */
-static void xen_release_pt_init(u32 pfn)
+static void xen_release_pte_init(u32 pfn)
 {
        make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
 }
@@ -697,12 +711,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
        }
 }
 
-static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+static void xen_alloc_pte(struct mm_struct *mm, u32 pfn)
 {
        xen_alloc_ptpage(mm, pfn, PT_PTE);
 }
 
-static void xen_alloc_pd(struct mm_struct *mm, u32 pfn)
+static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn)
 {
        xen_alloc_ptpage(mm, pfn, PT_PMD);
 }
@@ -722,12 +736,12 @@ static void xen_release_ptpage(u32 pfn, unsigned level)
        }
 }
 
-static void xen_release_pt(u32 pfn)
+static void xen_release_pte(u32 pfn)
 {
        xen_release_ptpage(pfn, PT_PTE);
 }
 
-static void xen_release_pd(u32 pfn)
+static void xen_release_pmd(u32 pfn)
 {
        xen_release_ptpage(pfn, PT_PMD);
 }
@@ -849,10 +863,10 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
 {
        /* This will work as long as patching hasn't happened yet
           (which it hasn't) */
-       pv_mmu_ops.alloc_pt = xen_alloc_pt;
-       pv_mmu_ops.alloc_pd = xen_alloc_pd;
-       pv_mmu_ops.release_pt = xen_release_pt;
-       pv_mmu_ops.release_pd = xen_release_pd;
+       pv_mmu_ops.alloc_pte = xen_alloc_pte;
+       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
+       pv_mmu_ops.release_pte = xen_release_pte;
+       pv_mmu_ops.release_pmd = xen_release_pmd;
        pv_mmu_ops.set_pte = xen_set_pte;
 
        setup_shared_info();
@@ -994,7 +1008,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
        .read_pmc = native_read_pmc,
 
        .iret = xen_iret,
-       .irq_enable_syscall_ret = NULL,  /* never called */
+       .irq_enable_syscall_ret = xen_sysexit,
 
        .load_tr_desc = paravirt_nop,
        .set_ldt = xen_set_ldt,
@@ -1059,11 +1073,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .pte_update = paravirt_nop,
        .pte_update_defer = paravirt_nop,
 
-       .alloc_pt = xen_alloc_pt_init,
-       .release_pt = xen_release_pt_init,
-       .alloc_pd = xen_alloc_pt_init,
-       .alloc_pd_clone = paravirt_nop,
-       .release_pd = xen_release_pt_init,
+       .alloc_pte = xen_alloc_pte_init,
+       .release_pte = xen_release_pte_init,
+       .alloc_pmd = xen_alloc_pte_init,
+       .alloc_pmd_clone = paravirt_nop,
+       .release_pmd = xen_release_pte_init,
 
 #ifdef CONFIG_HIGHPTE
        .kmap_atomic_pte = xen_kmap_atomic_pte,
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
new file mode 100644 (file)
index 0000000..49ba9b5
--- /dev/null
@@ -0,0 +1,91 @@
+/******************************************************************************
+ * grant_table.c
+ * x86 specific part
+ *
+ * Granting foreign access to our memory reservation.
+ *
+ * Copyright (c) 2005-2006, Christopher Clark
+ * Copyright (c) 2004-2005, K A Fraser
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan. Split out x86 specific part.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <xen/interface/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <asm/pgtable.h>
+
+static int map_pte_fn(pte_t *pte, struct page *pmd_page,
+                     unsigned long addr, void *data)
+{
+       unsigned long **frames = (unsigned long **)data;
+
+       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
+       (*frames)++;
+       return 0;
+}
+
+static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
+                       unsigned long addr, void *data)
+{
+
+       set_pte_at(&init_mm, addr, pte, __pte(0));
+       return 0;
+}
+
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          struct grant_entry **__shared)
+{
+       int rc;
+       struct grant_entry *shared = *__shared;
+
+       if (shared == NULL) {
+               struct vm_struct *area =
+                       xen_alloc_vm_area(PAGE_SIZE * max_nr_gframes);
+               BUG_ON(area == NULL);
+               shared = area->addr;
+               *__shared = shared;
+       }
+
+       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
+                                PAGE_SIZE * nr_gframes,
+                                map_pte_fn, &frames);
+       return rc;
+}
+
+void arch_gnttab_unmap_shared(struct grant_entry *shared,
+                             unsigned long nr_gframes)
+{
+       apply_to_page_range(&init_mm, (unsigned long)shared,
+                           PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+}
index 2a054ef2a3dab3ac6fc3cebd380a21b8b84dd54a..6cbcf65609addd6f04d6d6b8d05607624442337b 100644 (file)
@@ -156,6 +156,10 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval)
 {
+       /* updates to init_mm may be done without lock */
+       if (mm == &init_mm)
+               preempt_disable();
+
        if (mm == current->mm || mm == &init_mm) {
                if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
                        struct multicall_space mcs;
@@ -163,14 +167,61 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
 
                        MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
                        xen_mc_issue(PARAVIRT_LAZY_MMU);
-                       return;
+                       goto out;
                } else
                        if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
-                               return;
+                               goto out;
        }
        xen_set_pte(ptep, pteval);
+
+out:
+       if (mm == &init_mm)
+               preempt_enable();
+}
+
+pteval_t xen_pte_val(pte_t pte)
+{
+       pteval_t ret = pte.pte;
+
+       if (ret & _PAGE_PRESENT)
+               ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
+
+       return ret;
+}
+
+pgdval_t xen_pgd_val(pgd_t pgd)
+{
+       pgdval_t ret = pgd.pgd;
+       if (ret & _PAGE_PRESENT)
+               ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
+       return ret;
+}
+
+pte_t xen_make_pte(pteval_t pte)
+{
+       if (pte & _PAGE_PRESENT) {
+               pte = phys_to_machine(XPADDR(pte)).maddr;
+               pte &= ~(_PAGE_PCD | _PAGE_PWT);
+       }
+
+       return (pte_t){ .pte = pte };
 }
 
+pgd_t xen_make_pgd(pgdval_t pgd)
+{
+       if (pgd & _PAGE_PRESENT)
+               pgd = phys_to_machine(XPADDR(pgd)).maddr;
+
+       return (pgd_t){ pgd };
+}
+
+pmdval_t xen_pmd_val(pmd_t pmd)
+{
+       pmdval_t ret = native_pmd_val(pmd);
+       if (ret & _PAGE_PRESENT)
+               ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
+       return ret;
+}
 #ifdef CONFIG_X86_PAE
 void xen_set_pud(pud_t *ptr, pud_t val)
 {
@@ -214,100 +265,18 @@ void xen_pmd_clear(pmd_t *pmdp)
        xen_set_pmd(pmdp, __pmd(0));
 }
 
-unsigned long long xen_pte_val(pte_t pte)
+pmd_t xen_make_pmd(pmdval_t pmd)
 {
-       unsigned long long ret = 0;
-
-       if (pte.pte_low) {
-               ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
-               ret = machine_to_phys(XMADDR(ret)).paddr | 1;
-       }
-
-       return ret;
-}
-
-unsigned long long xen_pmd_val(pmd_t pmd)
-{
-       unsigned long long ret = pmd.pmd;
-       if (ret)
-               ret = machine_to_phys(XMADDR(ret)).paddr | 1;
-       return ret;
-}
-
-unsigned long long xen_pgd_val(pgd_t pgd)
-{
-       unsigned long long ret = pgd.pgd;
-       if (ret)
-               ret = machine_to_phys(XMADDR(ret)).paddr | 1;
-       return ret;
-}
-
-pte_t xen_make_pte(unsigned long long pte)
-{
-       if (pte & _PAGE_PRESENT) {
-               pte = phys_to_machine(XPADDR(pte)).maddr;
-               pte &= ~(_PAGE_PCD | _PAGE_PWT);
-       }
-
-       return (pte_t){ .pte = pte };
-}
-
-pmd_t xen_make_pmd(unsigned long long pmd)
-{
-       if (pmd & 1)
+       if (pmd & _PAGE_PRESENT)
                pmd = phys_to_machine(XPADDR(pmd)).maddr;
 
-       return (pmd_t){ pmd };
-}
-
-pgd_t xen_make_pgd(unsigned long long pgd)
-{
-       if (pgd & _PAGE_PRESENT)
-               pgd = phys_to_machine(XPADDR(pgd)).maddr;
-
-       return (pgd_t){ pgd };
+       return native_make_pmd(pmd);
 }
 #else  /* !PAE */
 void xen_set_pte(pte_t *ptep, pte_t pte)
 {
        *ptep = pte;
 }
-
-unsigned long xen_pte_val(pte_t pte)
-{
-       unsigned long ret = pte.pte_low;
-
-       if (ret & _PAGE_PRESENT)
-               ret = machine_to_phys(XMADDR(ret)).paddr;
-
-       return ret;
-}
-
-unsigned long xen_pgd_val(pgd_t pgd)
-{
-       unsigned long ret = pgd.pgd;
-       if (ret)
-               ret = machine_to_phys(XMADDR(ret)).paddr | 1;
-       return ret;
-}
-
-pte_t xen_make_pte(unsigned long pte)
-{
-       if (pte & _PAGE_PRESENT) {
-               pte = phys_to_machine(XPADDR(pte)).maddr;
-               pte &= ~(_PAGE_PCD | _PAGE_PWT);
-       }
-
-       return (pte_t){ pte };
-}
-
-pgd_t xen_make_pgd(unsigned long pgd)
-{
-       if (pgd & _PAGE_PRESENT)
-               pgd = phys_to_machine(XPADDR(pgd)).maddr;
-
-       return (pgd_t){ pgd };
-}
 #endif /* CONFIG_X86_PAE */
 
 /*
index 2341492bf7a056743097e06bfabd0b8c01f7436f..82517e4a752a19049c0177768851f48a1190a639 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
+#include <xen/interface/callback.h>
 #include <xen/interface/physdev.h>
 #include <xen/features.h>
 
@@ -68,6 +69,24 @@ static void __init fiddle_vdso(void)
        *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
 }
 
+void xen_enable_sysenter(void)
+{
+       int cpu = smp_processor_id();
+       extern void xen_sysenter_target(void);
+       /* Mask events on entry, even though they get enabled immediately */
+       static struct callback_register sysenter = {
+               .type = CALLBACKTYPE_sysenter,
+               .address = { __KERNEL_CS, (unsigned long)xen_sysenter_target },
+               .flags = CALLBACKF_mask_events,
+       };
+
+       if (!boot_cpu_has(X86_FEATURE_SEP) ||
+           HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) != 0) {
+               clear_cpu_cap(&cpu_data(cpu), X86_FEATURE_SEP);
+               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
+       }
+}
+
 void __init xen_arch_setup(void)
 {
        struct physdev_set_iopl set_iopl;
@@ -82,6 +101,8 @@ void __init xen_arch_setup(void)
        HYPERVISOR_set_callbacks(__KERNEL_CS, (unsigned long)xen_hypervisor_callback,
                                 __KERNEL_CS, (unsigned long)xen_failsafe_callback);
 
+       xen_enable_sysenter();
+
        set_iopl.iopl = 1;
        rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
        if (rc != 0)
index e340ff92f6b6e4299396ddf53f79b5908fe90636..92dd3dbf3ffbfae1d06be69a77fe74a2aaf5a9b2 100644 (file)
@@ -36,8 +36,9 @@
 #include "mmu.h"
 
 static cpumask_t xen_cpu_initialized_map;
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
+static DEFINE_PER_CPU(int, resched_irq) = -1;
+static DEFINE_PER_CPU(int, callfunc_irq) = -1;
+static DEFINE_PER_CPU(int, debug_irq) = -1;
 
 /*
  * Structure and data for smp_call_function(). This is designed to minimise
@@ -72,6 +73,7 @@ static __cpuinit void cpu_bringup_and_idle(void)
        int cpu = smp_processor_id();
 
        cpu_init();
+       xen_enable_sysenter();
 
        preempt_disable();
        per_cpu(cpu_state, cpu) = CPU_ONLINE;
@@ -88,9 +90,7 @@ static __cpuinit void cpu_bringup_and_idle(void)
 static int xen_smp_intr_init(unsigned int cpu)
 {
        int rc;
-       const char *resched_name, *callfunc_name;
-
-       per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
+       const char *resched_name, *callfunc_name, *debug_name;
 
        resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -114,6 +114,14 @@ static int xen_smp_intr_init(unsigned int cpu)
                goto fail;
        per_cpu(callfunc_irq, cpu) = rc;
 
+       debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+       rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
+                                    IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
+                                    debug_name, NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(debug_irq, cpu) = rc;
+
        return 0;
 
  fail:
@@ -121,6 +129,8 @@ static int xen_smp_intr_init(unsigned int cpu)
                unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
        if (per_cpu(callfunc_irq, cpu) >= 0)
                unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+       if (per_cpu(debug_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
        return rc;
 }
 
index fe161ed4b01e49de531b6e5b26462aad7d0c222d..2497a30f41de0331b93c5f36df5804013a68ee9b 100644 (file)
@@ -107,6 +107,20 @@ ENDPATCH(xen_restore_fl_direct)
        ENDPROC(xen_restore_fl_direct)
        RELOC(xen_restore_fl_direct, 2b+1)
 
+/*
+       We can't use sysexit directly, because we're not running in ring0.
+       But we can easily fake it up using iret.  Assuming xen_sysexit
+       is jumped to with a standard stack frame, we can just strip it
+       back to a standard iret frame and use iret.
+ */
+ENTRY(xen_sysexit)
+       movl PT_EAX(%esp), %eax                 /* Shouldn't be necessary? */
+       orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
+       lea PT_EIP(%esp), %esp
+
+       jmp xen_iret
+ENDPROC(xen_sysexit)
+
 /*
        This is run where a normal iret would be run, with the same stack setup:
              8: eflags
@@ -184,8 +198,12 @@ iret_restore_end:
           region is OK. */
        je xen_hypervisor_callback
 
-       iret
+1:     iret
 xen_iret_end_crit:
+.section __ex_table,"a"
+       .align 4
+       .long 1b,iret_exc
+.previous
 
 hyper_iret:
        /* put this out of line since its very rarely used */
@@ -219,9 +237,7 @@ hyper_iret:
         ds             }  SAVE_ALL state
         eax            }
          :             :
-        ebx            }
-       ----------------
-        return addr     <- esp
+        ebx            }<- esp
        ----------------
 
    In order to deliver the nested exception properly, we need to shift
@@ -236,10 +252,8 @@ hyper_iret:
    it's usermode state which we eventually need to restore.
  */
 ENTRY(xen_iret_crit_fixup)
-       /* offsets +4 for return address */
-
        /*
-          Paranoia: Make sure we're really coming from userspace.
+          Paranoia: Make sure we're really coming from kernel space.
           One could imagine a case where userspace jumps into the
           critical range address, but just before the CPU delivers a GP,
           it decides to deliver an interrupt instead.  Unlikely?
@@ -248,32 +262,32 @@ ENTRY(xen_iret_crit_fixup)
           jump instruction itself, not the destination, but some virtual
           environments get this wrong.
         */
-       movl PT_CS+4(%esp), %ecx
+       movl PT_CS(%esp), %ecx
        andl $SEGMENT_RPL_MASK, %ecx
        cmpl $USER_RPL, %ecx
        je 2f
 
-       lea PT_ORIG_EAX+4(%esp), %esi
-       lea PT_EFLAGS+4(%esp), %edi
+       lea PT_ORIG_EAX(%esp), %esi
+       lea PT_EFLAGS(%esp), %edi
 
        /* If eip is before iret_restore_end then stack
           hasn't been restored yet. */
        cmp $iret_restore_end, %eax
        jae 1f
 
-       movl 0+4(%edi),%eax             /* copy EAX */
-       movl %eax, PT_EAX+4(%esp)
+       movl 0+4(%edi),%eax             /* copy EAX (just above top of frame) */
+       movl %eax, PT_EAX(%esp)
 
        lea ESP_OFFSET(%edi),%edi       /* move dest up over saved regs */
 
        /* set up the copy */
 1:     std
-       mov $(PT_EIP+4) / 4, %ecx       /* copy ret+saved regs up to orig_eax */
+       mov $PT_EIP / 4, %ecx           /* saved regs up to orig_eax */
        rep movsl
        cld
 
        lea 4(%edi),%esp                /* point esp to new frame */
-2:     ret
+2:     jmp xen_do_upcall
 
 
 /*
index 956a491ea998b1e5dddc0a90dde1b88ad5b25268..f1063ae0803795014d29af4a02ca9eb4211536b1 100644 (file)
@@ -2,6 +2,8 @@
 #define XEN_OPS_H
 
 #include <linux/init.h>
+#include <linux/irqreturn.h>
+#include <xen/xen-ops.h>
 
 /* These are code, but not functions.  Defined in entry.S */
 extern const char xen_hypervisor_callback[];
@@ -9,7 +11,6 @@ extern const char xen_failsafe_callback[];
 
 void xen_copy_trap_info(struct trap_info *traps);
 
-DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
 DECLARE_PER_CPU(unsigned long, xen_cr3);
 DECLARE_PER_CPU(unsigned long, xen_current_cr3);
 
@@ -19,6 +20,7 @@ extern struct shared_info *HYPERVISOR_shared_info;
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
 void __init xen_init_IRQ(void);
+void xen_enable_sysenter(void);
 
 void xen_setup_timer(int cpu);
 void xen_setup_cpu_clockevents(void);
@@ -28,6 +30,8 @@ unsigned long xen_get_wallclock(void);
 int xen_set_wallclock(unsigned long time);
 unsigned long long xen_sched_clock(void);
 
+irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
+
 bool xen_vcpu_stolen(int vcpu);
 
 void xen_mark_init_mm_pinned(void);
@@ -64,4 +68,6 @@ DECL_ASM(unsigned long, xen_save_fl_direct, void);
 DECL_ASM(void, xen_restore_fl_direct, unsigned long);
 
 void xen_iret(void);
+void xen_sysexit(void);
+
 #endif /* XEN_OPS_H */
index 3a0e3549739f5980c6458d53c135f33878346ead..80f0ec91e2cf3fd04f6076661c6e223ec931a5db 100644 (file)
@@ -97,4 +97,6 @@ source "drivers/dca/Kconfig"
 source "drivers/auxdisplay/Kconfig"
 
 source "drivers/uio/Kconfig"
+
+source "drivers/xen/Kconfig"
 endmenu
index 9c6f3f99208d073ad9cbfd3ff61e69a0e3dc0a02..d771da816d95d708cff253e41e3a6ab05cd9d930 100644 (file)
@@ -47,6 +47,7 @@
 
 #include <xen/interface/grant_table.h>
 #include <xen/interface/io/blkif.h>
+#include <xen/interface/io/protocols.h>
 
 #include <asm/xen/hypervisor.h>
 
@@ -74,7 +75,6 @@ static struct block_device_operations xlvbd_block_fops;
 struct blkfront_info
 {
        struct xenbus_device *xbdev;
-       dev_t dev;
        struct gendisk *gd;
        int vdevice;
        blkif_vdev_t handle;
@@ -88,6 +88,7 @@ struct blkfront_info
        struct blk_shadow shadow[BLK_RING_SIZE];
        unsigned long shadow_free;
        int feature_barrier;
+       int is_ready;
 
        /**
         * The number of people holding this device open.  We won't allow a
@@ -614,6 +615,12 @@ again:
                message = "writing event-channel";
                goto abort_transaction;
        }
+       err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
+                           XEN_IO_PROTO_ABI_NATIVE);
+       if (err) {
+               message = "writing protocol";
+               goto abort_transaction;
+       }
 
        err = xenbus_transaction_end(xbt, 0);
        if (err) {
@@ -833,6 +840,8 @@ static void blkfront_connect(struct blkfront_info *info)
        spin_unlock_irq(&blkif_io_lock);
 
        add_disk(info->gd);
+
+       info->is_ready = 1;
 }
 
 /**
@@ -896,7 +905,7 @@ static void backend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosing:
-               bd = bdget(info->dev);
+               bd = bdget_disk(info->gd, 0);
                if (bd == NULL)
                        xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
 
@@ -925,6 +934,13 @@ static int blkfront_remove(struct xenbus_device *dev)
        return 0;
 }
 
+static int blkfront_is_ready(struct xenbus_device *dev)
+{
+       struct blkfront_info *info = dev->dev.driver_data;
+
+       return info->is_ready;
+}
+
 static int blkif_open(struct inode *inode, struct file *filep)
 {
        struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
@@ -971,6 +987,7 @@ static struct xenbus_driver blkfront = {
        .remove = blkfront_remove,
        .resume = blkfront_resume,
        .otherend_changed = backend_changed,
+       .is_ready = blkfront_is_ready,
 };
 
 static int __init xlblk_init(void)
@@ -998,3 +1015,5 @@ module_exit(xlblk_exit);
 MODULE_DESCRIPTION("Xen virtual block device frontend");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
+MODULE_ALIAS("xen:vbd");
+MODULE_ALIAS("xenblk");
index 9769bf8279a6adff8d39a13fd31611bce42f0a49..60b934adea65139c5c802b995e6be28950cc6e2e 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/input.h>
 #include <linux/reboot.h>
 #include <linux/notifier.h>
+#include <linux/jiffies.h>
 
 extern void ctrl_alt_del(void);
 
@@ -928,7 +929,8 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
        if (up_flag) {
                if (brl_timeout) {
                        if (!committing ||
-                           jiffies - releasestart > (brl_timeout * HZ) / 1000) {
+                           time_after(jiffies,
+                                      releasestart + msecs_to_jiffies(brl_timeout))) {
                                committing = pressed;
                                releasestart = jiffies;
                        }
@@ -1238,6 +1240,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
        }
 
        param.shift = shift_final = (shift_state | kbd->slockstate) ^ kbd->lockstate;
+       param.ledstate = kbd->ledflagstate;
        key_map = key_maps[shift_final];
 
        if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYCODE, &param) == NOTIFY_STOP || !key_map) {
@@ -1286,6 +1289,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
 
        (*k_handler[type])(vc, keysym & 0xff, !down);
 
+       param.ledstate = kbd->ledflagstate;
        atomic_notifier_call_chain(&keyboard_notifier_list, KBD_POST_KEYSYM, &param);
 
        if (type != KT_SLOCK)
index 28ddc3fdd3d1ffc717bd8735b813fc55a4c64a55..d3f8d9194f3026879319354a5254ae6a80bcf3c9 100644 (file)
 #define USB_VENDOR_ID_YEALINK          0x6993
 #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K      0xb001
 
+#define USB_VENDOR_ID_KYE              0x0458
+#define USB_DEVICE_ID_KYE_GPEN_560     0x5003
+
 /*
  * Alphabetically sorted blacklist by quirk type.
  */
@@ -698,6 +701,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_63, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_64, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY, HID_QUIRK_IGNORE },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560, HID_QUIRK_IGNORE },
 
        { 0, 0 }
 };
index 6228fadacd388d7ffd014548796dec4263cfe454..9d19aec5820a303145d8b286e2d1ce2e72b4a586 100644 (file)
@@ -2167,6 +2167,7 @@ static const struct file_operations dv1394_fops=
 /*
  * Export information about protocols/devices supported by this driver.
  */
+#ifdef MODULE
 static struct ieee1394_device_id dv1394_id_table[] = {
        {
                .match_flags    = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
@@ -2177,6 +2178,7 @@ static struct ieee1394_device_id dv1394_id_table[] = {
 };
 
 MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
+#endif /* MODULE */
 
 static struct hpsb_protocol_driver dv1394_driver = {
        .name = "dv1394",
index b94e55e6eaa59034c9c5825fc656f2de3a7fe54e..b5de5f21ef78a57e45032500867fdb24538de00e 100644 (file)
@@ -123,6 +123,8 @@ struct hpsb_iso {
 
        /* how many times the buffer has overflowed or underflowed */
        atomic_t overflows;
+       /* how many cycles were skipped for a given context */
+       atomic_t skips;
 
        /* Current number of bytes lost in discarded packets */
        int bytes_discarded;
index 0690469fcecf9acfdb53ce4140c5ba4e5c99dd9a..e509e13cb7a7cdb4c2d154835dea19857082f418 100644 (file)
@@ -1723,6 +1723,8 @@ struct ohci_iso_xmit {
        struct dma_prog_region prog;
        struct ohci1394_iso_tasklet task;
        int task_active;
+       int last_cycle;
+       atomic_t skips;
 
        u32 ContextControlSet;
        u32 ContextControlClear;
@@ -1759,6 +1761,8 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
        iso->hostdata = xmit;
        xmit->ohci = iso->host->hostdata;
        xmit->task_active = 0;
+       xmit->last_cycle = -1;
+       atomic_set(&iso->skips, 0);
 
        dma_prog_region_init(&xmit->prog);
 
@@ -1856,6 +1860,26 @@ static void ohci_iso_xmit_task(unsigned long data)
                /* parse cycle */
                cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
 
+               if (xmit->last_cycle > -1) {
+                       int cycle_diff = cycle - xmit->last_cycle;
+                       int skip;
+
+                       /* unwrap */
+                       if (cycle_diff < 0) {
+                               cycle_diff += 8000;
+                               if (cycle_diff < 0)
+                                       PRINT(KERN_ERR, "bogus cycle diff %d\n",
+                                             cycle_diff);
+                       }
+
+                       skip = cycle_diff - 1;
+                       if (skip > 0) {
+                               DBGMSG("skipped %d cycles without packet loss", skip);
+                               atomic_add(skip, &iso->skips);
+                       }
+               }
+               xmit->last_cycle = cycle;
+
                /* tell the subsystem the packet has gone out */
                hpsb_iso_packet_sent(iso, cycle, event != 0x11);
 
@@ -1943,6 +1967,16 @@ static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info
        prev->output_last.branchAddress = cpu_to_le32(
                dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
 
+       /*
+        * Link the skip address to this descriptor itself. This causes a
+        * context to skip a cycle whenever lost cycles or FIFO overruns occur,
+        * without dropping the data at that point the application should then
+        * decide whether this is an error condition or not. Some protocols
+        * can deal with this by dropping some rate-matching padding packets.
+        */
+       next->output_more_immediate.branchAddress =
+                       prev->output_last.branchAddress;
+
        /* disable interrupt, unless required by the IRQ interval */
        if (prev_i % iso->irq_interval) {
                prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
index 04e96ba56e090af3cd4ff6ab44e5b1fb147d2fd1..ec2a0adbedb248f2a493b2ca716fc8d7971c98ed 100644 (file)
@@ -2356,13 +2356,16 @@ static void rawiso_activity_cb(struct hpsb_iso *iso)
 static void raw1394_iso_fill_status(struct hpsb_iso *iso,
                                    struct raw1394_iso_status *stat)
 {
+       int overflows = atomic_read(&iso->overflows);
+       int skips = atomic_read(&iso->skips);
+
        stat->config.data_buf_size = iso->buf_size;
        stat->config.buf_packets = iso->buf_packets;
        stat->config.channel = iso->channel;
        stat->config.speed = iso->speed;
        stat->config.irq_interval = iso->irq_interval;
        stat->n_packets = hpsb_iso_n_ready(iso);
-       stat->overflows = atomic_read(&iso->overflows);
+       stat->overflows = ((skips & 0xFFFF) << 16) | ((overflows & 0xFFFF));
        stat->xmit_cycle = iso->xmit_cycle;
 }
 
@@ -2437,6 +2440,8 @@ static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
 
        /* reset overflow counter */
        atomic_set(&iso->overflows, 0);
+       /* reset skip counter */
+       atomic_set(&iso->skips, 0);
 
        return 0;
 }
@@ -2935,6 +2940,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
 /*
  * Export information about protocols/devices supported by this driver.
  */
+#ifdef MODULE
 static struct ieee1394_device_id raw1394_id_table[] = {
        {
         .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
@@ -2956,6 +2962,7 @@ static struct ieee1394_device_id raw1394_id_table[] = {
 };
 
 MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
+#endif /* MODULE */
 
 static struct hpsb_protocol_driver raw1394_driver = {
        .name = "raw1394",
index e03024eeeac135fac1da8b5422b7b84e3a4d16d3..e24772d336e1ae6c726d8e0a05ae5ea25a9207e0 100644 (file)
@@ -1293,6 +1293,7 @@ static const struct file_operations video1394_fops=
 /*
  * Export information about protocols/devices supported by this driver.
  */
+#ifdef MODULE
 static struct ieee1394_device_id video1394_id_table[] = {
        {
                .match_flags    = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
@@ -1313,6 +1314,7 @@ static struct ieee1394_device_id video1394_id_table[] = {
 };
 
 MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
+#endif /* MODULE */
 
 static struct hpsb_protocol_driver video1394_driver = {
        .name = VIDEO1394_DRIVER_NAME,
index 9dea14db724ca1a186f8d7f765b94c28ed0db3a3..5f9d860925a17c7f8799880ba096fe2ccd41dc3c 100644 (file)
@@ -149,6 +149,15 @@ config INPUT_APMPOWER
          To compile this driver as a module, choose M here: the
          module will be called apm-power.
 
+config XEN_KBDDEV_FRONTEND
+       tristate "Xen virtual keyboard and mouse support"
+       depends on XEN_FBDEV_FRONTEND
+       default y
+       help
+         This driver implements the front-end of the Xen virtual
+         keyboard and mouse device driver.  It communicates with a back-end
+         in another domain.
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
index 2ae87b19caa891cfe21ce1b49bc5923e15909a99..98c4f9a778768d40af7a4942624caf8f46f13260 100644 (file)
@@ -23,3 +23,5 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN)       += touchscreen/
 obj-$(CONFIG_INPUT_MISC)       += misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)   += apm-power.o
+
+obj-$(CONFIG_XEN_KBDDEV_FRONTEND)      += xen-kbdfront.o
index 490918a5d1922789f1f206c2746579c6c78a7009..0d3ce7a50fb1a345ab10a8ff1926dc4635475a90 100644 (file)
@@ -73,7 +73,7 @@ static void input_polled_device_work(struct work_struct *work)
 
 static int input_open_polled_device(struct input_dev *input)
 {
-       struct input_polled_dev *dev = input->private;
+       struct input_polled_dev *dev = input_get_drvdata(input);
        int error;
 
        error = input_polldev_start_workqueue();
@@ -91,7 +91,7 @@ static int input_open_polled_device(struct input_dev *input)
 
 static void input_close_polled_device(struct input_dev *input)
 {
-       struct input_polled_dev *dev = input->private;
+       struct input_polled_dev *dev = input_get_drvdata(input);
 
        cancel_delayed_work_sync(&dev->work);
        input_polldev_stop_workqueue();
@@ -151,10 +151,10 @@ int input_register_polled_device(struct input_polled_dev *dev)
 {
        struct input_dev *input = dev->input;
 
+       input_set_drvdata(input, dev);
        INIT_DELAYED_WORK(&dev->work, input_polled_device_work);
        if (!dev->poll_interval)
                dev->poll_interval = 500;
-       input->private = dev;
        input->open = input_open_polled_device;
        input->close = input_close_polled_device;
 
index 7c662ee594a3e9fdf85761765ae6776ddecf05d9..be5c14a5a0a4f8afcff50c18ee6ecd4343d44411 100644 (file)
@@ -193,6 +193,18 @@ config JOYSTICK_TWIDJOY
          To compile this driver as a module, choose M here: the
          module will be called twidjoy.
 
+config JOYSTICK_ZHENHUA
+       tristate "5-byte Zhenhua RC transmitter"
+       select SERIO
+       help
+         Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
+         supplied with a ready to fly micro electric indoor helicopters
+         such as EasyCopter, Lama, MiniCopter, DragonFly or Jabo and want
+         to use it via serial cable as a joystick.
+
+         To compile this driver as a module, choose M here: the
+         module will be called zhenhua.
+
 config JOYSTICK_DB9
        tristate "Multisystem, Sega Genesis, Saturn joysticks and gamepads"
        depends on PARPORT
index e855abb0cc5175e0ffc9692e9e3f5b20efb937ef..fdbf8c4c2876083686f8fc868427776792f80997 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_JOYSTICK_GF2K)           += gf2k.o
 obj-$(CONFIG_JOYSTICK_GRIP)            += grip.o
 obj-$(CONFIG_JOYSTICK_GRIP_MP)         += grip_mp.o
 obj-$(CONFIG_JOYSTICK_GUILLEMOT)       += guillemot.o
+obj-$(CONFIG_JOYSTICK_IFORCE)          += iforce/
 obj-$(CONFIG_JOYSTICK_INTERACT)                += interact.o
 obj-$(CONFIG_JOYSTICK_JOYDUMP)         += joydump.o
 obj-$(CONFIG_JOYSTICK_MAGELLAN)                += magellan.o
@@ -27,5 +28,5 @@ obj-$(CONFIG_JOYSTICK_TURBOGRAFX)     += turbografx.o
 obj-$(CONFIG_JOYSTICK_TWIDJOY)         += twidjoy.o
 obj-$(CONFIG_JOYSTICK_WARRIOR)         += warrior.o
 obj-$(CONFIG_JOYSTICK_XPAD)            += xpad.o
+obj-$(CONFIG_JOYSTICK_ZHENHUA)         += zhenhua.o
 
-obj-$(CONFIG_JOYSTICK_IFORCE)          += iforce/
index 2854c8fc334b01d98942f62a3c655dad5d669e8d..4b07bdadb81e16d9f7f0392ee6a942a81de0c78b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * X-Box gamepad - v0.0.6
+ * X-Box gamepad driver
  *
  * Copyright (c) 2002 Marko Friedemann <mfr@bmx-chemnitz.de>
  *               2004 Oliver Schwartz <Oliver.Schwartz@gmx.de>,
@@ -68,6 +68,8 @@
  *  - dance pads will map D-PAD to buttons, not axes
  *  - pass the module paramater 'dpad_to_buttons' to force
  *    the D-PAD to map to buttons if your pad is not detected
+ *
+ * Later changes can be tracked in SCM.
  */
 
 #include <linux/kernel.h>
@@ -77,7 +79,6 @@
 #include <linux/module.h>
 #include <linux/usb/input.h>
 
-#define DRIVER_VERSION "v0.0.6"
 #define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
 #define DRIVER_DESC "X-Box pad driver"
 
    but we map them to axes when possible to simplify things */
 #define MAP_DPAD_TO_BUTTONS    0
 #define MAP_DPAD_TO_AXES       1
-#define MAP_DPAD_UNKNOWN       -1
+#define MAP_DPAD_UNKNOWN       2
 
 #define XTYPE_XBOX        0
 #define XTYPE_XBOX360     1
+#define XTYPE_XBOX360W    2
+#define XTYPE_UNKNOWN     3
 
 static int dpad_to_buttons;
 module_param(dpad_to_buttons, bool, S_IRUGO);
@@ -107,8 +110,10 @@ static const struct xpad_device {
        { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
        { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
        { 0x045e, 0x0287, "Microsoft Xbox Controller S", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+       { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
        { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+       { 0x046d, 0xc242, "Logitech Chillstream Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
        { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
        { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", MAP_DPAD_TO_AXES, XTYPE_XBOX },
        { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
@@ -135,18 +140,26 @@ static const struct xpad_device {
        { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
        { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
        { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+       { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
        { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x045e, 0x028e, "Microsoft X-Box 360 pad", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
-       { 0x0000, 0x0000, "Generic X-Box pad", MAP_DPAD_UNKNOWN, XTYPE_XBOX }
+       { 0x0000, 0x0000, "Generic X-Box pad", MAP_DPAD_UNKNOWN, XTYPE_UNKNOWN }
 };
 
-static const signed short xpad_btn[] = {
-       BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z,       /* "analog" buttons */
+/* buttons shared with xbox and xbox360 */
+static const signed short xpad_common_btn[] = {
+       BTN_A, BTN_B, BTN_X, BTN_Y,                     /* "analog" buttons */
        BTN_START, BTN_BACK, BTN_THUMBL, BTN_THUMBR,    /* start/back/sticks */
        -1                                              /* terminating entry */
 };
 
+/* original xbox controllers only */
+static const signed short xpad_btn[] = {
+       BTN_C, BTN_Z,           /* "analog" buttons */
+       -1                      /* terminating entry */
+};
+
 /* only used if MAP_DPAD_TO_BUTTONS */
 static const signed short xpad_btn_pad[] = {
        BTN_LEFT, BTN_RIGHT,            /* d-pad left, right */
@@ -173,12 +186,27 @@ static const signed short xpad_abs_pad[] = {
        -1                      /* terminating entry */
 };
 
-/* Xbox 360 has a vendor-specific (sub)class, so we cannot match it with only
- * USB_INTERFACE_INFO, more to that this device has 4 InterfaceProtocols,
- * but we need only one of them. */
+/* Xbox 360 has a vendor-specific class, so we cannot match it with only
+ * USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
+ * match against vendor id as well. Wired Xbox 360 devices have protocol 1,
+ * wireless controllers have protocol 129. */
+#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
+       .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
+       .idVendor = (vend), \
+       .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
+       .bInterfaceSubClass = 93, \
+       .bInterfaceProtocol = (pr)
+#define XPAD_XBOX360_VENDOR(vend) \
+       { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
+       { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
+
 static struct usb_device_id xpad_table [] = {
        { USB_INTERFACE_INFO('X', 'B', 0) },    /* X-Box USB-IF not approved class */
-       { USB_DEVICE_INTERFACE_PROTOCOL(0x045e, 0x028e, 1) },   /* X-Box 360 controller */
+       XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 controllers */
+       XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
+       XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
+       XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
+       XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        { }
 };
 
@@ -188,10 +216,15 @@ struct usb_xpad {
        struct input_dev *dev;          /* input device interface */
        struct usb_device *udev;        /* usb device */
 
+       int pad_present;
+
        struct urb *irq_in;             /* urb for interrupt in report */
        unsigned char *idata;           /* input data */
        dma_addr_t idata_dma;
 
+       struct urb *bulk_out;
+       unsigned char *bdata;
+
 #if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
        struct urb *irq_out;            /* urb for interrupt out report */
        unsigned char *odata;           /* output data */
@@ -227,13 +260,13 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
        input_report_abs(dev, ABS_X,
                         (__s16) le16_to_cpup((__le16 *)(data + 12)));
        input_report_abs(dev, ABS_Y,
-                        (__s16) le16_to_cpup((__le16 *)(data + 14)));
+                        ~(__s16) le16_to_cpup((__le16 *)(data + 14)));
 
        /* right stick */
        input_report_abs(dev, ABS_RX,
                         (__s16) le16_to_cpup((__le16 *)(data + 16)));
        input_report_abs(dev, ABS_RY,
-                        (__s16) le16_to_cpup((__le16 *)(data + 18)));
+                        ~(__s16) le16_to_cpup((__le16 *)(data + 18)));
 
        /* triggers left/right */
        input_report_abs(dev, ABS_Z, data[10]);
@@ -321,13 +354,13 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
        input_report_abs(dev, ABS_X,
                         (__s16) le16_to_cpup((__le16 *)(data + 6)));
        input_report_abs(dev, ABS_Y,
-                        (__s16) le16_to_cpup((__le16 *)(data + 8)));
+                        ~(__s16) le16_to_cpup((__le16 *)(data + 8)));
 
        /* right stick */
        input_report_abs(dev, ABS_RX,
                         (__s16) le16_to_cpup((__le16 *)(data + 10)));
        input_report_abs(dev, ABS_RY,
-                        (__s16) le16_to_cpup((__le16 *)(data + 12)));
+                        ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
 
        /* triggers left/right */
        input_report_abs(dev, ABS_Z, data[4]);
@@ -336,6 +369,39 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
        input_sync(dev);
 }
 
+/*
+ * xpad360w_process_packet
+ *
+ * Completes a request by converting the data into events for the
+ * input subsystem. It is version for xbox 360 wireless controller.
+ *
+ * Byte.Bit
+ * 00.1 - Status change: The controller or headset has connected/disconnected
+ *                       Bits 01.7 and 01.6 are valid
+ * 01.7 - Controller present
+ * 01.6 - Headset present
+ * 01.1 - Pad state (Bytes 4+) valid
+ *
+ */
+
+static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
+{
+       /* Presence change */
+       if (data[0] & 0x08) {
+               if (data[1] & 0x80) {
+                       xpad->pad_present = 1;
+                       usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
+               } else
+                       xpad->pad_present = 0;
+       }
+
+       /* Valid pad data */
+       if (!(data[1] & 0x1))
+               return;
+
+       xpad360_process_packet(xpad, cmd, &data[4]);
+}
+
 static void xpad_irq_in(struct urb *urb)
 {
        struct usb_xpad *xpad = urb->context;
@@ -360,10 +426,16 @@ static void xpad_irq_in(struct urb *urb)
                goto exit;
        }
 
-       if (xpad->xtype == XTYPE_XBOX360)
+       switch (xpad->xtype) {
+       case XTYPE_XBOX360:
                xpad360_process_packet(xpad, 0, xpad->idata);
-       else
+               break;
+       case XTYPE_XBOX360W:
+               xpad360w_process_packet(xpad, 0, xpad->idata);
+               break;
+       default:
                xpad_process_packet(xpad, 0, xpad->idata);
+       }
 
 exit:
        retval = usb_submit_urb (urb, GFP_ATOMIC);
@@ -403,6 +475,23 @@ exit:
                    __FUNCTION__, retval);
 }
 
+static void xpad_bulk_out(struct urb *urb)
+{
+       switch (urb->status) {
+       case 0:
+               /* success */
+               break;
+       case -ECONNRESET:
+       case -ENOENT:
+       case -ESHUTDOWN:
+               /* this urb is terminated, clean up */
+               dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+               break;
+       default:
+               dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+       }
+}
+
 static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
 {
        struct usb_endpoint_descriptor *ep_irq_out;
@@ -412,7 +501,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
                return 0;
 
        xpad->odata = usb_buffer_alloc(xpad->udev, XPAD_PKT_LEN,
-                                      GFP_ATOMIC, &xpad->odata_dma );
+                                      GFP_KERNEL, &xpad->odata_dma);
        if (!xpad->odata)
                goto fail1;
 
@@ -473,6 +562,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data,
                xpad->odata[5] = 0x00;
                xpad->odata[6] = 0x00;
                xpad->odata[7] = 0x00;
+               xpad->irq_out->transfer_buffer_length = 8;
                usb_submit_urb(xpad->irq_out, GFP_KERNEL);
        }
 
@@ -481,6 +571,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data,
 
 static int xpad_init_ff(struct usb_xpad *xpad)
 {
+       if (xpad->xtype != XTYPE_XBOX360)
+               return 0;
+
        input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
 
        return input_ff_create_memless(xpad->dev, NULL, xpad_play_effect);
@@ -506,6 +599,7 @@ static void xpad_send_led_command(struct usb_xpad *xpad, int command)
                xpad->odata[0] = 0x01;
                xpad->odata[1] = 0x03;
                xpad->odata[2] = command;
+               xpad->irq_out->transfer_buffer_length = 3;
                usb_submit_urb(xpad->irq_out, GFP_KERNEL);
                mutex_unlock(&xpad->odata_mutex);
        }
@@ -578,6 +672,10 @@ static int xpad_open(struct input_dev *dev)
 {
        struct usb_xpad *xpad = input_get_drvdata(dev);
 
+       /* URB was submitted in probe */
+       if(xpad->xtype == XTYPE_XBOX360W)
+               return 0;
+
        xpad->irq_in->dev = xpad->udev;
        if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
                return -EIO;
@@ -589,7 +687,8 @@ static void xpad_close(struct input_dev *dev)
 {
        struct usb_xpad *xpad = input_get_drvdata(dev);
 
-       usb_kill_urb(xpad->irq_in);
+       if(xpad->xtype != XTYPE_XBOX360W)
+               usb_kill_urb(xpad->irq_in);
        xpad_stop_output(xpad);
 }
 
@@ -636,7 +735,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                goto fail1;
 
        xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
-                                      GFP_ATOMIC, &xpad->idata_dma);
+                                      GFP_KERNEL, &xpad->idata_dma);
        if (!xpad->idata)
                goto fail1;
 
@@ -648,7 +747,16 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
        xpad->dpad_mapping = xpad_device[i].dpad_mapping;
        xpad->xtype = xpad_device[i].xtype;
        if (xpad->dpad_mapping == MAP_DPAD_UNKNOWN)
-               xpad->dpad_mapping = dpad_to_buttons;
+               xpad->dpad_mapping = !dpad_to_buttons;
+       if (xpad->xtype == XTYPE_UNKNOWN) {
+               if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
+                       if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
+                               xpad->xtype = XTYPE_XBOX360W;
+                       else
+                               xpad->xtype = XTYPE_XBOX360;
+               } else
+                       xpad->xtype = XTYPE_XBOX;
+       }
        xpad->dev = input_dev;
        usb_make_path(udev, xpad->phys, sizeof(xpad->phys));
        strlcat(xpad->phys, "/input0", sizeof(xpad->phys));
@@ -666,11 +774,14 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
        input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
 
        /* set up buttons */
-       for (i = 0; xpad_btn[i] >= 0; i++)
-               set_bit(xpad_btn[i], input_dev->keybit);
-       if (xpad->xtype == XTYPE_XBOX360)
+       for (i = 0; xpad_common_btn[i] >= 0; i++)
+               set_bit(xpad_common_btn[i], input_dev->keybit);
+       if ((xpad->xtype == XTYPE_XBOX360) || (xpad->xtype == XTYPE_XBOX360W))
                for (i = 0; xpad360_btn[i] >= 0; i++)
                        set_bit(xpad360_btn[i], input_dev->keybit);
+       else
+               for (i = 0; xpad_btn[i] >= 0; i++)
+                       set_bit(xpad_btn[i], input_dev->keybit);
        if (xpad->dpad_mapping == MAP_DPAD_TO_BUTTONS)
                for (i = 0; xpad_btn_pad[i] >= 0; i++)
                        set_bit(xpad_btn_pad[i], input_dev->keybit);
@@ -707,8 +818,57 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                goto fail4;
 
        usb_set_intfdata(intf, xpad);
+
+       /*
+        * Submit the int URB immediatly rather than waiting for open
+        * because we get status messages from the device whether
+        * or not any controllers are attached.  In fact, it's
+        * exactly the message that a controller has arrived that
+        * we're waiting for.
+        */
+       if (xpad->xtype == XTYPE_XBOX360W) {
+               xpad->irq_in->dev = xpad->udev;
+               error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
+               if (error)
+                       goto fail4;
+
+               /*
+                * Setup the message to set the LEDs on the
+                * controller when it shows up
+                */
+               xpad->bulk_out = usb_alloc_urb(0, GFP_KERNEL);
+               if(!xpad->bulk_out)
+                       goto fail5;
+
+               xpad->bdata = kzalloc(XPAD_PKT_LEN, GFP_KERNEL);
+               if(!xpad->bdata)
+                       goto fail6;
+
+               xpad->bdata[2] = 0x08;
+               switch (intf->cur_altsetting->desc.bInterfaceNumber) {
+               case 0:
+                       xpad->bdata[3] = 0x42;
+                       break;
+               case 2:
+                       xpad->bdata[3] = 0x43;
+                       break;
+               case 4:
+                       xpad->bdata[3] = 0x44;
+                       break;
+               case 6:
+                       xpad->bdata[3] = 0x45;
+               }
+
+               ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
+               usb_fill_bulk_urb(xpad->bulk_out, udev,
+                               usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
+                               xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
+       }
+
        return 0;
 
+ fail6:        usb_free_urb(xpad->bulk_out);
+ fail5:        usb_kill_urb(xpad->irq_in);
  fail4:        usb_free_urb(xpad->irq_in);
  fail3:        xpad_deinit_output(xpad);
  fail2:        usb_buffer_free(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
@@ -727,6 +887,11 @@ static void xpad_disconnect(struct usb_interface *intf)
                xpad_led_disconnect(xpad);
                input_unregister_device(xpad->dev);
                xpad_deinit_output(xpad);
+               if (xpad->xtype == XTYPE_XBOX360W) {
+                       usb_kill_urb(xpad->bulk_out);
+                       usb_free_urb(xpad->bulk_out);
+                       usb_kill_urb(xpad->irq_in);
+               }
                usb_free_urb(xpad->irq_in);
                usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
                                xpad->idata, xpad->idata_dma);
@@ -745,7 +910,7 @@ static int __init usb_xpad_init(void)
 {
        int result = usb_register(&xpad_driver);
        if (result == 0)
-               info(DRIVER_DESC ":" DRIVER_VERSION);
+               info(DRIVER_DESC);
        return result;
 }
 
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
new file mode 100644 (file)
index 0000000..b585312
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ *  derived from "twidjoy.c"
+ *
+ *  Copyright (c) 2008 Martin Kebert
+ *  Copyright (c) 2001 Arndt Schoenewald
+ *  Copyright (c) 2000-2001 Vojtech Pavlik
+ *  Copyright (c) 2000 Mark Fletcher
+ *
+ */
+
+/*
+ * Driver to use 4CH RC transmitter using Zhen Hua 5-byte protocol (Walkera Lama,
+ * EasyCopter etc.) as a joystick under Linux.
+ *
+ * RC transmitters using Zhen Hua 5-byte protocol are cheap four channels
+ * transmitters for control a RC planes or RC helicopters with possibility to
+ * connect on a serial port.
+ * Data coming from transmitter is in this order:
+ * 1. byte = synchronisation byte
+ * 2. byte = X axis
+ * 3. byte = Y axis
+ * 4. byte = RZ axis
+ * 5. byte = Z axis
+ * (and this is repeated)
+ *
+ * For questions or feedback regarding this driver module please contact:
+ * Martin Kebert <gkmarty@gmail.com> - but I am not a C-programmer nor kernel
+ * coder :-(
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+
+#define DRIVER_DESC    "RC transmitter with 5-byte Zhen Hua protocol joystick driver"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/*
+ * Constants.
+ */
+
+#define ZHENHUA_MAX_LENGTH 5
+
+/*
+ * Zhen Hua data.
+ */
+
+struct zhenhua {
+       struct input_dev *dev;
+       int idx;
+       unsigned char data[ZHENHUA_MAX_LENGTH];
+       char phys[32];
+};
+
+
+/* bits in all incoming bytes needs to be "reversed" */
+static int zhenhua_bitreverse(int x)
+{
+       x = ((x & 0xaa) >> 1) | ((x & 0x55) << 1);
+       x = ((x & 0xcc) >> 2) | ((x & 0x33) << 2);
+       x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4);
+       return x;
+}
+
+/*
+ * zhenhua_process_packet() decodes packets the driver receives from the
+ * RC transmitter. It updates the data accordingly.
+ */
+
+static void zhenhua_process_packet(struct zhenhua *zhenhua)
+{
+       struct input_dev *dev = zhenhua->dev;
+       unsigned char *data = zhenhua->data;
+
+       input_report_abs(dev, ABS_Y, data[1]);
+       input_report_abs(dev, ABS_X, data[2]);
+       input_report_abs(dev, ABS_RZ, data[3]);
+       input_report_abs(dev, ABS_Z, data[4]);
+
+       input_sync(dev);
+}
+
+/*
+ * zhenhua_interrupt() is called by the low level driver when characters
+ * are ready for us. We then buffer them for further processing, or call the
+ * packet processing routine.
+ */
+
+static irqreturn_t zhenhua_interrupt(struct serio *serio, unsigned char data, unsigned int flags)
+{
+       struct zhenhua *zhenhua = serio_get_drvdata(serio);
+
+       /* All Zhen Hua packets are 5 bytes. The fact that the first byte
+        * is allways 0xf7 and all others are in range 0x32 - 0xc8 (50-200)
+        * can be used to check and regain sync. */
+
+       if (data == 0xef)
+               zhenhua->idx = 0;       /* this byte starts a new packet */
+       else if (zhenhua->idx == 0)
+               return IRQ_HANDLED;     /* wrong MSB -- ignore this byte */
+
+       if (zhenhua->idx < ZHENHUA_MAX_LENGTH)
+               zhenhua->data[zhenhua->idx++] = zhenhua_bitreverse(data);
+
+       if (zhenhua->idx == ZHENHUA_MAX_LENGTH) {
+               zhenhua_process_packet(zhenhua);
+               zhenhua->idx = 0;
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * zhenhua_disconnect() is the opposite of zhenhua_connect()
+ */
+
+static void zhenhua_disconnect(struct serio *serio)
+{
+       struct zhenhua *zhenhua = serio_get_drvdata(serio);
+
+       serio_close(serio);
+       serio_set_drvdata(serio, NULL);
+       input_unregister_device(zhenhua->dev);
+       kfree(zhenhua);
+}
+
+/*
+ * zhenhua_connect() is the routine that is called when someone adds a
+ * new serio device. It looks for the Twiddler, and if found, registers
+ * it as an input device.
+ */
+
+static int zhenhua_connect(struct serio *serio, struct serio_driver *drv)
+{
+       struct zhenhua *zhenhua;
+       struct input_dev *input_dev;
+       int err = -ENOMEM;
+
+       zhenhua = kzalloc(sizeof(struct zhenhua), GFP_KERNEL);
+       input_dev = input_allocate_device();
+       if (!zhenhua || !input_dev)
+               goto fail1;
+
+       zhenhua->dev = input_dev;
+       snprintf(zhenhua->phys, sizeof(zhenhua->phys), "%s/input0", serio->phys);
+
+       input_dev->name = "Zhen Hua 5-byte device";
+       input_dev->phys = zhenhua->phys;
+       input_dev->id.bustype = BUS_RS232;
+       input_dev->id.vendor = SERIO_ZHENHUA;
+       input_dev->id.product = 0x0001;
+       input_dev->id.version = 0x0100;
+       input_dev->dev.parent = &serio->dev;
+
+       input_dev->evbit[0] = BIT(EV_ABS);
+       input_set_abs_params(input_dev, ABS_X, 50, 200, 0, 0);
+       input_set_abs_params(input_dev, ABS_Y, 50, 200, 0, 0);
+       input_set_abs_params(input_dev, ABS_Z, 50, 200, 0, 0);
+       input_set_abs_params(input_dev, ABS_RZ, 50, 200, 0, 0);
+
+       serio_set_drvdata(serio, zhenhua);
+
+       err = serio_open(serio, drv);
+       if (err)
+               goto fail2;
+
+       err = input_register_device(zhenhua->dev);
+       if (err)
+               goto fail3;
+
+       return 0;
+
+ fail3:        serio_close(serio);
+ fail2:        serio_set_drvdata(serio, NULL);
+ fail1:        input_free_device(input_dev);
+       kfree(zhenhua);
+       return err;
+}
+
+/*
+ * The serio driver structure.
+ */
+
+static struct serio_device_id zhenhua_serio_ids[] = {
+       {
+               .type   = SERIO_RS232,
+               .proto  = SERIO_ZHENHUA,
+               .id     = SERIO_ANY,
+               .extra  = SERIO_ANY,
+       },
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, zhenhua_serio_ids);
+
+static struct serio_driver zhenhua_drv = {
+       .driver         = {
+               .name   = "zhenhua",
+       },
+       .description    = DRIVER_DESC,
+       .id_table       = zhenhua_serio_ids,
+       .interrupt      = zhenhua_interrupt,
+       .connect        = zhenhua_connect,
+       .disconnect     = zhenhua_disconnect,
+};
+
+/*
+ * The functions for inserting/removing us as a module.
+ */
+
+static int __init zhenhua_init(void)
+{
+       return serio_register_driver(&zhenhua_drv);
+}
+
+static void __exit zhenhua_exit(void)
+{
+       serio_unregister_driver(&zhenhua_drv);
+}
+
+module_init(zhenhua_init);
+module_exit(zhenhua_exit);
index 72abc196ce66580000b25e0800e5b4908fc03383..a293e8b3f508a4ef2303bbe5a03391bc8a3694e2 100644 (file)
@@ -156,11 +156,15 @@ static int __devexit aaedkbd_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:aaed2000-keyboard");
+
 static struct platform_driver aaedkbd_driver = {
        .probe          = aaedkbd_probe,
        .remove         = __devexit_p(aaedkbd_remove),
        .driver         = {
                .name   = "aaed2000-keyboard",
+               .owner  = THIS_MODULE,
        },
 };
 
index 05e3494cf8b87f4bb80fc9b904cda36099e6b6e5..54ed8e2e1c021c540f2c06adaef5ae009f3d056b 100644 (file)
@@ -312,6 +312,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev)
 
        bfin_write_KPAD_CTL(bfin_read_KPAD_CTL() | KPAD_EN);
 
+       device_init_wakeup(&pdev->dev, 1);
+
        printk(KERN_ERR DRV_NAME
                ": Blackfin BF54x Keypad registered IRQ %d\n", bf54x_kpad->irq);
 
@@ -354,12 +356,40 @@ static int __devexit bfin_kpad_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int bfin_kpad_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
+
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(bf54x_kpad->irq);
+
+       return 0;
+}
+
+static int bfin_kpad_resume(struct platform_device *pdev)
+{
+       struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
+
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(bf54x_kpad->irq);
+
+       return 0;
+}
+#else
+# define bfin_kpad_suspend NULL
+# define bfin_kpad_resume  NULL
+#endif
+
 struct platform_driver bfin_kpad_device_driver = {
-       .probe          = bfin_kpad_probe,
-       .remove         = __devexit_p(bfin_kpad_remove),
        .driver         = {
                .name   = DRV_NAME,
-       }
+               .owner  = THIS_MODULE,
+       },
+       .probe          = bfin_kpad_probe,
+       .remove         = __devexit_p(bfin_kpad_remove),
+       .suspend        = bfin_kpad_suspend,
+       .resume         = bfin_kpad_resume,
 };
 
 static int __init bfin_kpad_init(void)
@@ -378,3 +408,4 @@ module_exit(bfin_kpad_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Keypad driver for BF54x Processors");
+MODULE_ALIAS("platform:bf54x-keys");
index 5d6cc7f1dc947d74c9de6a955080fb388c087c6d..29fbec6218b92cfc12f70c5e19a9df03dc301e7c 100644 (file)
@@ -393,6 +393,7 @@ static struct platform_driver corgikbd_driver = {
        .resume         = corgikbd_resume,
        .driver         = {
                .name   = "corgi-keyboard",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -412,3 +413,4 @@ module_exit(corgikbd_exit);
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
 MODULE_DESCRIPTION("Corgi Keyboard Driver");
 MODULE_LICENSE("GPLv2");
+MODULE_ALIAS("platform:corgi-keyboard");
index 6a9ca4bdcb74acfa1dd9abc5bfec06bd2abeda62..bbd00c3fe98ca18999b19c011a8c0040f3e9fb9b 100644 (file)
@@ -43,10 +43,11 @@ static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
 
                        input_event(input, type, button->code, !!state);
                        input_sync(input);
+                       return IRQ_HANDLED;
                }
        }
 
-       return IRQ_HANDLED;
+       return IRQ_NONE;
 }
 
 static int __devinit gpio_keys_probe(struct platform_device *pdev)
@@ -213,6 +214,7 @@ struct platform_driver gpio_keys_device_driver = {
        .resume         = gpio_keys_resume,
        .driver         = {
                .name   = "gpio-keys",
+               .owner  = THIS_MODULE,
        }
 };
 
@@ -232,3 +234,4 @@ module_exit(gpio_keys_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>");
 MODULE_DESCRIPTION("Keyboard driver for CPU GPIOs");
+MODULE_ALIAS("platform:gpio-keys");
index a23633a2e1b46654979633556ef8564d1e160aa3..9387da343f9753edce0133f2d33436d9d1ddf663 100644 (file)
@@ -254,6 +254,7 @@ static int __devexit jornada680kbd_remove(struct platform_device *pdev)
 static struct platform_driver jornada680kbd_driver = {
        .driver = {
                .name   = "jornada680_kbd",
+               .owner  = THIS_MODULE,
        },
        .probe  = jornada680kbd_probe,
        .remove = __devexit_p(jornada680kbd_remove),
@@ -275,3 +276,4 @@ module_exit(jornada680kbd_exit);
 MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
 MODULE_DESCRIPTION("HP Jornada 620/660/680/690 Keyboard Driver");
 MODULE_LICENSE("GPLv2");
+MODULE_ALIAS("platform:jornada680_kbd");
index 986f93cfc6b8a0211020e8978f374c9c29b272d2..a1164a0c7736c4b557daa942bd3014e2861c8dc2 100644 (file)
@@ -162,9 +162,13 @@ static int __devexit jornada720_kbd_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:jornada720_kbd");
+
 static struct platform_driver jornada720_kbd_driver = {
        .driver  = {
                .name    = "jornada720_kbd",
+               .owner  = THIS_MODULE,
         },
        .probe   = jornada720_kbd_probe,
        .remove  = __devexit_p(jornada720_kbd_remove),
index 5a0ca18d6755827b77d776af407364b1a8b2d30d..9caed30f3bbba29203377d45e61e633a21cfec7f 100644 (file)
@@ -1,14 +1,12 @@
 /*
- *  Copyright (c) 2005 John Lenz
+ * LoCoMo keyboard driver for Linux-based ARM PDAs:
+ *     - SHARP Zaurus Collie (SL-5500)
+ *     - SHARP Zaurus Poodle (SL-5600)
  *
+ * Copyright (c) 2005 John Lenz
  * Based on from xtkbd.c
- */
-
-/*
- * LoCoMo keyboard driver for Linux/ARM
- */
-
-/*
+ *
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -47,7 +45,8 @@ MODULE_LICENSE("GPL");
 #define KEY_CONTACT            KEY_F18
 #define KEY_CENTER             KEY_F15
 
-static unsigned char locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
+static const unsigned char
+locomokbd_keycode[LOCOMOKBD_NUMKEYS] __devinitconst = {
        0, KEY_ESC, KEY_ACTIVITY, 0, 0, 0, 0, 0, 0, 0,                          /* 0 - 9 */
        0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_HOME, KEY_CONTACT,                   /* 10 - 19 */
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0,                                           /* 20 - 29 */
@@ -67,22 +66,21 @@ static unsigned char locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
 #define KB_COLS                        8
 #define KB_ROWMASK(r)          (1 << (r))
 #define SCANCODE(c,r)          ( ((c)<<4) + (r) + 1 )
-#define        NR_SCANCODES            128
 
 #define KB_DELAY               8
 #define SCAN_INTERVAL          (HZ/10)
-#define LOCOMOKBD_PRESSED      1
 
 struct locomokbd {
        unsigned char keycode[LOCOMOKBD_NUMKEYS];
        struct input_dev *input;
        char phys[32];
 
-       struct locomo_dev *ldev;
        unsigned long base;
        spinlock_t lock;
 
        struct timer_list timer;
+       unsigned long suspend_jiffies;
+       unsigned int count_cancel;
 };
 
 /* helper functions for reading the keyboard matrix */
@@ -128,7 +126,7 @@ static inline void locomokbd_reset_col(unsigned long membase, int col)
 /* Scan the hardware keyboard and push any changes up through the input layer */
 static void locomokbd_scankeyboard(struct locomokbd *locomokbd)
 {
-       unsigned int row, col, rowd, scancode;
+       unsigned int row, col, rowd;
        unsigned long flags;
        unsigned int num_pressed;
        unsigned long membase = locomokbd->base;
@@ -145,13 +143,33 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd)
 
                rowd = ~locomo_readl(membase + LOCOMO_KIB);
                for (row = 0; row < KB_ROWS; row++) {
+                       unsigned int scancode, pressed, key;
+
                        scancode = SCANCODE(col, row);
-                       if (rowd & KB_ROWMASK(row)) {
-                               num_pressed += 1;
-                               input_report_key(locomokbd->input, locomokbd->keycode[scancode], 1);
-                       } else {
-                               input_report_key(locomokbd->input, locomokbd->keycode[scancode], 0);
-                       }
+                       pressed = rowd & KB_ROWMASK(row);
+                       key = locomokbd->keycode[scancode];
+
+                       input_report_key(locomokbd->input, key, pressed);
+                       if (likely(!pressed))
+                               continue;
+
+                       num_pressed++;
+
+                       /* The "Cancel/ESC" key is labeled "On/Off" on
+                        * Collie and Poodle and should suspend the device
+                        * if it was pressed for more than a second. */
+                       if (unlikely(key == KEY_ESC)) {
+                               if (!time_after(jiffies,
+                                       locomokbd->suspend_jiffies + HZ))
+                                       continue;
+                               if (locomokbd->count_cancel++
+                                       != (HZ/SCAN_INTERVAL + 1))
+                                       continue;
+                               input_event(locomokbd->input, EV_PWR,
+                                       KEY_SUSPEND, 1);
+                               locomokbd->suspend_jiffies = jiffies;
+                       } else
+                               locomokbd->count_cancel = 0;
                }
                locomokbd_reset_col(membase, col);
        }
@@ -162,6 +180,8 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd)
        /* if any keys are pressed, enable the timer */
        if (num_pressed)
                mod_timer(&locomokbd->timer, jiffies + SCAN_INTERVAL);
+       else
+               locomokbd->count_cancel = 0;
 
        spin_unlock_irqrestore(&locomokbd->lock, flags);
 }
@@ -186,10 +206,11 @@ static irqreturn_t locomokbd_interrupt(int irq, void *dev_id)
 static void locomokbd_timer_callback(unsigned long data)
 {
        struct locomokbd *locomokbd = (struct locomokbd *) data;
+
        locomokbd_scankeyboard(locomokbd);
 }
 
-static int locomokbd_probe(struct locomo_dev *dev)
+static int __devinit locomokbd_probe(struct locomo_dev *dev)
 {
        struct locomokbd *locomokbd;
        struct input_dev *input_dev;
@@ -211,7 +232,6 @@ static int locomokbd_probe(struct locomo_dev *dev)
                goto err_free_mem;
        }
 
-       locomokbd->ldev = dev;
        locomo_set_drvdata(dev, locomokbd);
 
        locomokbd->base = (unsigned long) dev->mapbase;
@@ -222,6 +242,8 @@ static int locomokbd_probe(struct locomo_dev *dev)
        locomokbd->timer.function = locomokbd_timer_callback;
        locomokbd->timer.data = (unsigned long) locomokbd;
 
+       locomokbd->suspend_jiffies = jiffies;
+
        locomokbd->input = input_dev;
        strcpy(locomokbd->phys, "locomokbd/input0");
 
@@ -233,9 +255,10 @@ static int locomokbd_probe(struct locomo_dev *dev)
        input_dev->id.version = 0x0100;
        input_dev->dev.parent = &dev->dev;
 
-       input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+       input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
+                               BIT_MASK(EV_PWR);
        input_dev->keycode = locomokbd->keycode;
-       input_dev->keycodesize = sizeof(unsigned char);
+       input_dev->keycodesize = sizeof(locomokbd_keycode[0]);
        input_dev->keycodemax = ARRAY_SIZE(locomokbd_keycode);
 
        memcpy(locomokbd->keycode, locomokbd_keycode, sizeof(locomokbd->keycode));
@@ -268,7 +291,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
        return err;
 }
 
-static int locomokbd_remove(struct locomo_dev *dev)
+static int __devexit locomokbd_remove(struct locomo_dev *dev)
 {
        struct locomokbd *locomokbd = locomo_get_drvdata(dev);
 
@@ -292,7 +315,7 @@ static struct locomo_driver keyboard_driver = {
        },
        .devid  = LOCOMO_DEVID_KEYBOARD,
        .probe  = locomokbd_probe,
-       .remove = locomokbd_remove,
+       .remove = __devexit_p(locomokbd_remove),
 };
 
 static int __init locomokbd_init(void)
index babc913d5492b617b388ee3eb7781ce0facd535e..10afd2068068aa35fe5ae1f84d0f034cf746e625 100644 (file)
@@ -352,6 +352,9 @@ static int __init omap_kp_probe(struct platform_device *pdev)
                        }
                        omap_set_gpio_direction(row_gpios[row_idx], 1);
                }
+       } else {
+               col_idx = 0;
+               row_idx = 0;
        }
 
        setup_timer(&omap_kp->timer, omap_kp_timer, (unsigned long)omap_kp);
@@ -415,10 +418,10 @@ err4:
 err3:
        device_remove_file(&pdev->dev, &dev_attr_enable);
 err2:
-       for (i = row_idx-1; i >=0; i--)
+       for (i = row_idx - 1; i >=0; i--)
                omap_free_gpio(row_gpios[i]);
 err1:
-       for (i = col_idx-1; i >=0; i--)
+       for (i = col_idx - 1; i >=0; i--)
                omap_free_gpio(col_gpios[i]);
 
        kfree(omap_kp);
@@ -464,6 +467,7 @@ static struct platform_driver omap_kp_driver = {
        .resume         = omap_kp_resume,
        .driver         = {
                .name   = "omap-keypad",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -484,3 +488,4 @@ module_exit(omap_kp_exit);
 MODULE_AUTHOR("Timo Teräs");
 MODULE_DESCRIPTION("OMAP Keypad Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-keypad");
index 4e651c11c1dad71c64fe033381447cb31212f079..3dea0c5077a9ad623b35dfb7a5c627f6da5a5d28 100644 (file)
@@ -545,6 +545,9 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:pxa27x-keypad");
+
 static struct platform_driver pxa27x_keypad_driver = {
        .probe          = pxa27x_keypad_probe,
        .remove         = __devexit_p(pxa27x_keypad_remove),
@@ -552,6 +555,7 @@ static struct platform_driver pxa27x_keypad_driver = {
        .resume         = pxa27x_keypad_resume,
        .driver         = {
                .name   = "pxa27x-keypad",
+               .owner  = THIS_MODULE,
        },
 };
 
index 0be74bfc58fe0ae084f6f7ec1e88a38eeabf0554..61e401bc91099abe18d200d9af486aac8a7706a8 100644 (file)
@@ -495,3 +495,4 @@ module_exit(spitzkbd_exit);
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
 MODULE_DESCRIPTION("Spitz Keyboard Driver");
 MODULE_LICENSE("GPLv2");
+MODULE_ALIAS("platform:spitz-keyboard");
index 3884d1e3f070f33a74c68da0f13f27d94aa4cb53..94e444b4ee1523c4539b687383eaa9c35a52fcfe 100644 (file)
@@ -52,7 +52,7 @@ KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_
 struct tosakbd {
        unsigned int keycode[ARRAY_SIZE(tosakbd_keycode)];
        struct input_dev *input;
-
+       int suspended;
        spinlock_t lock; /* protect kbd scanning */
        struct timer_list timer;
 };
@@ -133,6 +133,9 @@ static void tosakbd_scankeyboard(struct platform_device *dev)
 
        spin_lock_irqsave(&tosakbd->lock, flags);
 
+       if (tosakbd->suspended)
+               goto out;
+
        for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
                /*
                 * Discharge the output driver capacitatance
@@ -174,6 +177,7 @@ static void tosakbd_scankeyboard(struct platform_device *dev)
        if (num_pressed)
                mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
 
+ out:
        spin_unlock_irqrestore(&tosakbd->lock, flags);
 }
 
@@ -200,6 +204,7 @@ static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
 static void tosakbd_timer_callback(unsigned long __dev)
 {
        struct platform_device *dev = (struct platform_device *)__dev;
+
        tosakbd_scankeyboard(dev);
 }
 
@@ -207,6 +212,13 @@ static void tosakbd_timer_callback(unsigned long __dev)
 static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct tosakbd *tosakbd = platform_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&tosakbd->lock, flags);
+       PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT);
+       PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT);
+       tosakbd->suspended = 1;
+       spin_unlock_irqrestore(&tosakbd->lock, flags);
 
        del_timer_sync(&tosakbd->timer);
 
@@ -215,6 +227,9 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
 
 static int tosakbd_resume(struct platform_device *dev)
 {
+       struct tosakbd *tosakbd = platform_get_drvdata(dev);
+
+       tosakbd->suspended = 0;
        tosakbd_scankeyboard(dev);
 
        return 0;
@@ -365,8 +380,8 @@ fail:
        return error;
 }
 
-static int __devexit tosakbd_remove(struct platform_device *dev) {
-
+static int __devexit tosakbd_remove(struct platform_device *dev)
+{
        int i;
        struct tosakbd *tosakbd = platform_get_drvdata(dev);
 
@@ -394,6 +409,7 @@ static struct platform_driver tosakbd_driver = {
        .resume         = tosakbd_resume,
        .driver         = {
                .name   = "tosa-keyboard",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -413,3 +429,4 @@ module_exit(tosakbd_exit);
 MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
 MODULE_DESCRIPTION("Tosa Keyboard Driver");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tosa-keyboard");
index 5511ef006a66c591cf59df2ffde421001f282758..6a1f48b76e3298649130c8d341123bcbf2e0f131 100644 (file)
@@ -148,6 +148,9 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:Cobalt buttons");
+
 static struct platform_driver cobalt_buttons_driver = {
        .probe  = cobalt_buttons_probe,
        .remove = __devexit_p(cobalt_buttons_remove),
index 0936d6ba015c5a1221ee1021e7f6844670944dc4..3392901848711f4f5053b4d042387ce5d69a2611 100644 (file)
@@ -171,10 +171,14 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:gpio_mouse");
+
 struct platform_driver gpio_mouse_device_driver = {
        .remove         = __devexit_p(gpio_mouse_remove),
        .driver         = {
                .name   = "gpio_mouse",
+               .owner  = THIS_MODULE,
        }
 };
 
index b88569e21d60e017625a073e44c765601d8e1017..ec4b6610f730ea7d18a1e5e924201be4ac0a1123 100644 (file)
@@ -88,6 +88,16 @@ config SERIO_RPCKBD
          To compile this driver as a module, choose M here: the
          module will be called rpckbd.
 
+config SERIO_AT32PSIF
+       tristate "AVR32 PSIF PS/2 keyboard and mouse controller"
+       depends on AVR32
+       help
+         Say Y here if you want to use the PSIF peripheral on AVR32 devices
+         and connect a PS/2 keyboard and/or mouse to it.
+
+         To compile this driver as a module, choose M here: the module will
+         be called at32psif.
+
 config SERIO_AMBAKMI
        tristate "AMBA KMI keyboard controller"
        depends on ARM_AMBA
index 4155197867a33be98b0f106cff9eb243c2e98440..38b886887cbc2459aeae5d76c850b3031780061b 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_SERIO_CT82C710)  += ct82c710.o
 obj-$(CONFIG_SERIO_RPCKBD)     += rpckbd.o
 obj-$(CONFIG_SERIO_SA1111)     += sa1111ps2.o
 obj-$(CONFIG_SERIO_AMBAKMI)    += ambakmi.o
+obj-$(CONFIG_SERIO_AT32PSIF)   += at32psif.o
 obj-$(CONFIG_SERIO_Q40KBD)     += q40kbd.o
 obj-$(CONFIG_SERIO_GSCPS2)     += gscps2.o
 obj-$(CONFIG_HP_SDC)           += hp_sdc.o
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
new file mode 100644 (file)
index 0000000..41fda8c
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * Driver for the AT32AP700X PS/2 controller (PSIF).
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/serio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* PSIF register offsets */
+#define PSIF_CR                                0x00
+#define PSIF_RHR                       0x04
+#define PSIF_THR                       0x08
+#define PSIF_SR                                0x10
+#define PSIF_IER                       0x14
+#define PSIF_IDR                       0x18
+#define PSIF_IMR                       0x1c
+#define PSIF_PSR                       0x24
+
+/* Bitfields in control register. */
+#define PSIF_CR_RXDIS_OFFSET           1
+#define PSIF_CR_RXDIS_SIZE             1
+#define PSIF_CR_RXEN_OFFSET            0
+#define PSIF_CR_RXEN_SIZE              1
+#define PSIF_CR_SWRST_OFFSET           15
+#define PSIF_CR_SWRST_SIZE             1
+#define PSIF_CR_TXDIS_OFFSET           9
+#define PSIF_CR_TXDIS_SIZE             1
+#define PSIF_CR_TXEN_OFFSET            8
+#define PSIF_CR_TXEN_SIZE              1
+
+/* Bitfields in interrupt disable, enable, mask and status register. */
+#define PSIF_NACK_OFFSET               8
+#define PSIF_NACK_SIZE                 1
+#define PSIF_OVRUN_OFFSET              5
+#define PSIF_OVRUN_SIZE                        1
+#define PSIF_PARITY_OFFSET             9
+#define PSIF_PARITY_SIZE               1
+#define PSIF_RXRDY_OFFSET              4
+#define PSIF_RXRDY_SIZE                        1
+#define PSIF_TXEMPTY_OFFSET            1
+#define PSIF_TXEMPTY_SIZE              1
+#define PSIF_TXRDY_OFFSET              0
+#define PSIF_TXRDY_SIZE                        1
+
+/* Bitfields in prescale register. */
+#define PSIF_PSR_PRSCV_OFFSET          0
+#define PSIF_PSR_PRSCV_SIZE            12
+
+/* Bitfields in receive hold register. */
+#define PSIF_RHR_RXDATA_OFFSET         0
+#define PSIF_RHR_RXDATA_SIZE           8
+
+/* Bitfields in transmit hold register. */
+#define PSIF_THR_TXDATA_OFFSET         0
+#define PSIF_THR_TXDATA_SIZE           8
+
+/* Bit manipulation macros */
+#define PSIF_BIT(name)                                 \
+       (1 << PSIF_##name##_OFFSET)
+
+#define PSIF_BF(name, value)                           \
+       (((value) & ((1 << PSIF_##name##_SIZE) - 1))    \
+        << PSIF_##name##_OFFSET)
+
+#define PSIF_BFEXT(name, value)                                \
+       (((value) >> PSIF_##name##_OFFSET)              \
+        & ((1 << PSIF_##name##_SIZE) - 1))
+
+#define PSIF_BFINS(name, value, old)                   \
+       (((old) & ~(((1 << PSIF_##name##_SIZE) - 1)     \
+                   << PSIF_##name##_OFFSET))           \
+        | PSIF_BF(name, value))
+
+/* Register access macros */
+#define psif_readl(port, reg)                          \
+       __raw_readl((port)->regs + PSIF_##reg)
+
+#define psif_writel(port, reg, value)                  \
+       __raw_writel((value), (port)->regs + PSIF_##reg)
+
+struct psif {
+       struct platform_device  *pdev;
+       struct clk              *pclk;
+       struct serio            *io;
+       void __iomem            *regs;
+       unsigned int            irq;
+       unsigned int            open;
+       /* Prevent concurrent writes to PSIF THR. */
+       spinlock_t              lock;
+};
+
+static irqreturn_t psif_interrupt(int irq, void *_ptr)
+{
+       struct psif *psif = _ptr;
+       int retval = IRQ_NONE;
+       unsigned int io_flags = 0;
+       unsigned long status;
+
+       status = psif_readl(psif, SR);
+
+       if (status & PSIF_BIT(RXRDY)) {
+               unsigned char val = (unsigned char) psif_readl(psif, RHR);
+
+               if (status & PSIF_BIT(PARITY))
+                       io_flags |= SERIO_PARITY;
+               if (status & PSIF_BIT(OVRUN))
+                       dev_err(&psif->pdev->dev, "overrun read error\n");
+
+               serio_interrupt(psif->io, val, io_flags);
+
+               retval = IRQ_HANDLED;
+       }
+
+       return retval;
+}
+
+static int psif_write(struct serio *io, unsigned char val)
+{
+       struct psif *psif = io->port_data;
+       unsigned long flags;
+       int timeout = 10;
+       int retval = 0;
+
+       spin_lock_irqsave(&psif->lock, flags);
+
+       while (!(psif_readl(psif, SR) & PSIF_BIT(TXEMPTY)) && timeout--)
+               msleep(10);
+
+       if (timeout >= 0) {
+               psif_writel(psif, THR, val);
+       } else {
+               dev_dbg(&psif->pdev->dev, "timeout writing to THR\n");
+               retval = -EBUSY;
+       }
+
+       spin_unlock_irqrestore(&psif->lock, flags);
+
+       return retval;
+}
+
+static int psif_open(struct serio *io)
+{
+       struct psif *psif = io->port_data;
+       int retval;
+
+       retval = clk_enable(psif->pclk);
+       if (retval)
+               goto out;
+
+       psif_writel(psif, CR, PSIF_BIT(CR_TXEN) | PSIF_BIT(CR_RXEN));
+       psif_writel(psif, IER, PSIF_BIT(RXRDY));
+
+       psif->open = 1;
+out:
+       return retval;
+}
+
+static void psif_close(struct serio *io)
+{
+       struct psif *psif = io->port_data;
+
+       psif->open = 0;
+
+       psif_writel(psif, IDR, ~0UL);
+       psif_writel(psif, CR, PSIF_BIT(CR_TXDIS) | PSIF_BIT(CR_RXDIS));
+
+       clk_disable(psif->pclk);
+}
+
+static void psif_set_prescaler(struct psif *psif)
+{
+       unsigned long prscv;
+       unsigned long rate = clk_get_rate(psif->pclk);
+
+       /* PRSCV = Pulse length (100 us) * PSIF module frequency. */
+       prscv = 100 * (rate / 1000000UL);
+
+       if (prscv > ((1<<PSIF_PSR_PRSCV_SIZE) - 1)) {
+               prscv = (1<<PSIF_PSR_PRSCV_SIZE) - 1;
+               dev_dbg(&psif->pdev->dev, "pclk too fast, "
+                               "prescaler set to max\n");
+       }
+
+       clk_enable(psif->pclk);
+       psif_writel(psif, PSR, prscv);
+       clk_disable(psif->pclk);
+}
+
+static int __init psif_probe(struct platform_device *pdev)
+{
+       struct resource *regs;
+       struct psif *psif;
+       struct serio *io;
+       struct clk *pclk;
+       int irq;
+       int ret;
+
+       psif = kzalloc(sizeof(struct psif), GFP_KERNEL);
+       if (!psif) {
+               dev_dbg(&pdev->dev, "out of memory\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+       psif->pdev = pdev;
+
+       io = kzalloc(sizeof(struct serio), GFP_KERNEL);
+       if (!io) {
+               dev_dbg(&pdev->dev, "out of memory\n");
+               ret = -ENOMEM;
+               goto out_free_psif;
+       }
+       psif->io = io;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs) {
+               dev_dbg(&pdev->dev, "no mmio resources defined\n");
+               ret = -ENOMEM;
+               goto out_free_io;
+       }
+
+       psif->regs = ioremap(regs->start, regs->end - regs->start + 1);
+       if (!psif->regs) {
+               ret = -ENOMEM;
+               dev_dbg(&pdev->dev, "could not map I/O memory\n");
+               goto out_free_io;
+       }
+
+       pclk = clk_get(&pdev->dev, "pclk");
+       if (IS_ERR(pclk)) {
+               dev_dbg(&pdev->dev, "could not get peripheral clock\n");
+               ret = PTR_ERR(pclk);
+               goto out_iounmap;
+       }
+       psif->pclk = pclk;
+
+       /* Reset the PSIF to enter at a known state. */
+       ret = clk_enable(pclk);
+       if (ret) {
+               dev_dbg(&pdev->dev, "could not enable pclk\n");
+               goto out_put_clk;
+       }
+       psif_writel(psif, CR, PSIF_BIT(CR_SWRST));
+       clk_disable(pclk);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_dbg(&pdev->dev, "could not get irq\n");
+               ret = -ENXIO;
+               goto out_put_clk;
+       }
+       ret = request_irq(irq, psif_interrupt, IRQF_SHARED, "at32psif", psif);
+       if (ret) {
+               dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
+               goto out_put_clk;
+       }
+       psif->irq = irq;
+
+       io->id.type     = SERIO_8042;
+       io->write       = psif_write;
+       io->open        = psif_open;
+       io->close       = psif_close;
+       snprintf(io->name, sizeof(io->name), "AVR32 PS/2 port%d", pdev->id);
+       snprintf(io->phys, sizeof(io->phys), "at32psif/serio%d", pdev->id);
+       io->port_data   = psif;
+       io->dev.parent  = &pdev->dev;
+
+       psif_set_prescaler(psif);
+
+       spin_lock_init(&psif->lock);
+       serio_register_port(psif->io);
+       platform_set_drvdata(pdev, psif);
+
+       dev_info(&pdev->dev, "Atmel AVR32 PSIF PS/2 driver on 0x%08x irq %d\n",
+                       (int)psif->regs, psif->irq);
+
+       return 0;
+
+out_put_clk:
+       clk_put(psif->pclk);
+out_iounmap:
+       iounmap(psif->regs);
+out_free_io:
+       kfree(io);
+out_free_psif:
+       kfree(psif);
+out:
+       return ret;
+}
+
+static int __exit psif_remove(struct platform_device *pdev)
+{
+       struct psif *psif = platform_get_drvdata(pdev);
+
+       psif_writel(psif, IDR, ~0UL);
+       psif_writel(psif, CR, PSIF_BIT(CR_TXDIS) | PSIF_BIT(CR_RXDIS));
+
+       serio_unregister_port(psif->io);
+       iounmap(psif->regs);
+       free_irq(psif->irq, psif);
+       clk_put(psif->pclk);
+       kfree(psif);
+
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int psif_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct psif *psif = platform_get_drvdata(pdev);
+
+       if (psif->open) {
+               psif_writel(psif, CR, PSIF_BIT(CR_RXDIS) | PSIF_BIT(CR_TXDIS));
+               clk_disable(psif->pclk);
+       }
+
+       return 0;
+}
+
+static int psif_resume(struct platform_device *pdev)
+{
+       struct psif *psif = platform_get_drvdata(pdev);
+
+       if (psif->open) {
+               clk_enable(psif->pclk);
+               psif_set_prescaler(psif);
+               psif_writel(psif, CR, PSIF_BIT(CR_RXEN) | PSIF_BIT(CR_TXEN));
+       }
+
+       return 0;
+}
+#else
+#define psif_suspend   NULL
+#define psif_resume    NULL
+#endif
+
+static struct platform_driver psif_driver = {
+       .remove         = __exit_p(psif_remove),
+       .driver         = {
+               .name   = "atmel_psif",
+       },
+       .suspend        = psif_suspend,
+       .resume         = psif_resume,
+};
+
+static int __init psif_init(void)
+{
+       return platform_driver_probe(&psif_driver, psif_probe);
+}
+
+static void __exit psif_exit(void)
+{
+       platform_driver_unregister(&psif_driver);
+}
+
+module_init(psif_init);
+module_exit(psif_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver");
+MODULE_LICENSE("GPL");
index 60931aceb8282f42e217f1dd9eb2c451b2e55479..5ece9f56babc50ff1d0018e0dccec9832a19444a 100644 (file)
@@ -370,10 +370,10 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
        if (pnp_irq_valid(dev,0))
                i8042_pnp_kbd_irq = pnp_irq(dev, 0);
 
-       strncpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name));
+       strlcpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name));
        if (strlen(pnp_dev_name(dev))) {
-               strncat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
-               strncat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
+               strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
+               strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
        }
 
        i8042_pnp_kbd_devices++;
@@ -391,10 +391,10 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
        if (pnp_irq_valid(dev, 0))
                i8042_pnp_aux_irq = pnp_irq(dev, 0);
 
-       strncpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name));
+       strlcpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name));
        if (strlen(pnp_dev_name(dev))) {
-               strncat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
-               strncat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
+               strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
+               strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
        }
 
        i8042_pnp_aux_devices++;
index 49f84315cb32953ff7cee9066635800a9940a8b8..34c59d9c6205b47cb06f7fbfd77d7efe0e0bcf5a 100644 (file)
@@ -45,6 +45,7 @@
 MODULE_AUTHOR("Vojtech Pavlik, Russell King");
 MODULE_DESCRIPTION("Acorn RiscPC PS/2 keyboard controller driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:kart");
 
 static int rpckbd_write(struct serio *port, unsigned char val)
 {
@@ -140,6 +141,7 @@ static struct platform_driver rpckbd_driver = {
        .remove         = __devexit_p(rpckbd_remove),
        .driver         = {
                .name   = "kart",
+               .owner  = THIS_MODULE,
        },
 };
 
index d371c0bdc0bdbad09fd9e56e87f2526e5218f0ca..effb49ea24aa4c6ec337e14cfa4336e4fd153345 100644 (file)
@@ -25,14 +25,14 @@ config TABLET_USB_ACECAD
          module will be called acecad.
 
 config TABLET_USB_AIPTEK
-       tristate "Aiptek 6000U/8000U tablet support (USB)"
+       tristate "Aiptek 6000U/8000U and Genius G_PEN tablet support (USB)"
        depends on USB_ARCH_HAS_HCD
        select USB
        help
-         Say Y here if you want to use the USB version of the Aiptek 6000U
-         or Aiptek 8000U tablet.  Make sure to say Y to "Mouse support"
-         (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
-         (CONFIG_INPUT_EVDEV) as well.
+         Say Y here if you want to use the USB version of the Aiptek 6000U,
+         Aiptek 8000U or Genius G-PEN 560 tablet.  Make sure to say Y to
+         "Mouse support" (CONFIG_INPUT_MOUSEDEV) and/or "Event interface
+         support" (CONFIG_INPUT_EVDEV) as well.
 
          To compile this driver as a module, choose M here: the
          module will be called aiptek.
index 94683f58c9e18ec5b933d7721ab3b07467ae9a1c..1d759f6f807670479171b21bdc20da2d17a46072 100644 (file)
  */
 
 #define USB_VENDOR_ID_AIPTEK                           0x08ca
+#define USB_VENDOR_ID_KYE                              0x0458
 #define USB_REQ_GET_REPORT                             0x01
 #define USB_REQ_SET_REPORT                             0x09
 
@@ -832,6 +833,7 @@ static const struct usb_device_id aiptek_ids[] = {
        {USB_DEVICE(USB_VENDOR_ID_AIPTEK, 0x22)},
        {USB_DEVICE(USB_VENDOR_ID_AIPTEK, 0x23)},
        {USB_DEVICE(USB_VENDOR_ID_AIPTEK, 0x24)},
+       {USB_DEVICE(USB_VENDOR_ID_KYE, 0x5003)},
        {}
 };
 
index acf9830698cb7ab3cadd94c077270bd49d12cff1..706619d06f71e6c596acb8419866ecca0146feb9 100644 (file)
@@ -101,8 +101,11 @@ struct wacom {
        dma_addr_t data_dma;
        struct input_dev *dev;
        struct usb_device *usbdev;
+       struct usb_interface *intf;
        struct urb *irq;
        struct wacom_wac * wacom_wac;
+       struct mutex lock;
+       int open:1;
        char phys[32];
 };
 
index 41caaef8e2d72c3a8e098086f9f0edbe44be48c8..71cc0c140790462f29b2e1513c38806bfa65d090 100644 (file)
@@ -70,6 +70,7 @@ static void wacom_sys_irq(struct urb *urb)
                input_sync(get_input_dev(&wcombo));
 
  exit:
+       usb_mark_last_busy(wacom->usbdev);
        retval = usb_submit_urb (urb, GFP_ATOMIC);
        if (retval)
                err ("%s - usb_submit_urb failed with result %d",
@@ -124,10 +125,25 @@ static int wacom_open(struct input_dev *dev)
 {
        struct wacom *wacom = input_get_drvdata(dev);
 
+       mutex_lock(&wacom->lock);
+
        wacom->irq->dev = wacom->usbdev;
-       if (usb_submit_urb(wacom->irq, GFP_KERNEL))
+
+       if (usb_autopm_get_interface(wacom->intf) < 0) {
+               mutex_unlock(&wacom->lock);
                return -EIO;
+       }
+
+       if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
+               usb_autopm_put_interface(wacom->intf);
+               mutex_unlock(&wacom->lock);
+               return -EIO;
+       }
+
+       wacom->open = 1;
+       wacom->intf->needs_remote_wakeup = 1;
 
+       mutex_unlock(&wacom->lock);
        return 0;
 }
 
@@ -135,7 +151,11 @@ static void wacom_close(struct input_dev *dev)
 {
        struct wacom *wacom = input_get_drvdata(dev);
 
+       mutex_lock(&wacom->lock);
        usb_kill_urb(wacom->irq);
+       wacom->open = 0;
+       wacom->intf->needs_remote_wakeup = 0;
+       mutex_unlock(&wacom->lock);
 }
 
 void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
@@ -243,6 +263,8 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        wacom->usbdev = dev;
        wacom->dev = input_dev;
+       wacom->intf = intf;
+       mutex_init(&wacom->lock);
        usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
        strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
 
@@ -304,23 +326,57 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
 
 static void wacom_disconnect(struct usb_interface *intf)
 {
-       struct wacom *wacom = usb_get_intfdata (intf);
+       struct wacom *wacom = usb_get_intfdata(intf);
 
        usb_set_intfdata(intf, NULL);
-       if (wacom) {
-               usb_kill_urb(wacom->irq);
-               input_unregister_device(wacom->dev);
-               usb_free_urb(wacom->irq);
-               usb_buffer_free(interface_to_usbdev(intf), 10, wacom->wacom_wac->data, wacom->data_dma);
-               kfree(wacom->wacom_wac);
-               kfree(wacom);
-       }
+
+       usb_kill_urb(wacom->irq);
+       input_unregister_device(wacom->dev);
+       usb_free_urb(wacom->irq);
+       usb_buffer_free(interface_to_usbdev(intf), 10, wacom->wacom_wac->data, wacom->data_dma);
+       kfree(wacom->wacom_wac);
+       kfree(wacom);
+}
+
+static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct wacom *wacom = usb_get_intfdata(intf);
+
+       mutex_lock(&wacom->lock);
+       usb_kill_urb(wacom->irq);
+       mutex_unlock(&wacom->lock);
+
+       return 0;
+}
+
+static int wacom_resume(struct usb_interface *intf)
+{
+       struct wacom *wacom = usb_get_intfdata(intf);
+       int rv;
+
+       mutex_lock(&wacom->lock);
+       if (wacom->open)
+               rv = usb_submit_urb(wacom->irq, GFP_NOIO);
+       else
+               rv = 0;
+       mutex_unlock(&wacom->lock);
+
+       return rv;
+}
+
+static int wacom_reset_resume(struct usb_interface *intf)
+{
+       return wacom_resume(intf);
 }
 
 static struct usb_driver wacom_driver = {
        .name =         "wacom",
        .probe =        wacom_probe,
        .disconnect =   wacom_disconnect,
+       .suspend =      wacom_suspend,
+       .resume =       wacom_resume,
+       .reset_resume = wacom_reset_resume,
+       .supports_autosuspend = 1,
 };
 
 static int __init wacom_init(void)
index ffe33842143f1178eff883568e51b19fc1eca136..192513e1f04cdc71529eeae8d75663e4866ab39b 100644 (file)
@@ -649,6 +649,7 @@ static struct wacom_features wacom_features[] = {
        { "Wacom Intuos3 6x11",  10, 54204, 31750, 1023, 63, INTUOS3 },
        { "Wacom Intuos3 4x6",   10, 31496, 19685, 1023, 63, INTUOS3S },
        { "Wacom Cintiq 21UX",   10, 87200, 65600, 1023, 63, CINTIQ },
+       { "Wacom Cintiq 20WSX",  10, 86680, 54180, 1023, 63, WACOM_BEE },
        { "Wacom Cintiq 12WX",   10, 53020, 33440, 1023, 63, WACOM_BEE },
        { "Wacom Intuos2 6x8",   10, 20320, 16240, 1023, 31, INTUOS },
        { }
@@ -702,6 +703,7 @@ static struct usb_device_id wacom_ids[] = {
        { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xB5) },
        { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xB7) },
        { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x3F) },
+       { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC5) },
        { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC6) },
        { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) },
        { }
index 90e8e92dfe4792ca662e60bb94cdf30c2ee62c0d..565ec711c2eefa6e5dc0f75dc4be5d1f6569a962 100644 (file)
@@ -185,6 +185,59 @@ config TOUCHSCREEN_UCB1400
          To compile this driver as a module, choose M here: the
          module will be called ucb1400_ts.
 
+config TOUCHSCREEN_WM97XX
+       tristate "Support for WM97xx AC97 touchscreen controllers"
+       depends on AC97_BUS
+       help
+         Say Y here if you have a Wolfson Microelectronics WM97xx
+         touchscreen connected to your system. Note that this option
+         only enables core driver, you will also need to select
+         support for appropriate chip below.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called wm97xx-ts.
+
+config TOUCHSCREEN_WM9705
+       bool "WM9705 Touchscreen interface support"
+       depends on TOUCHSCREEN_WM97XX
+       help
+         Say Y here if you have a Wolfson Microelectronics WM9705
+         touchscreen controller connected to your system.
+
+         If unsure, say N.
+
+config TOUCHSCREEN_WM9712
+       bool "WM9712 Touchscreen interface support"
+       depends on TOUCHSCREEN_WM97XX
+       help
+         Say Y here if you have a Wolfson Microelectronics WM9712
+         touchscreen controller connected to your system.
+
+         If unsure, say N.
+
+config TOUCHSCREEN_WM9713
+       bool "WM9713 Touchscreen interface support"
+       depends on TOUCHSCREEN_WM97XX
+       help
+         Say Y here if you have a Wolfson Microelectronics WM9713 touchscreen
+         controller connected to your system.
+
+         If unsure, say N.
+
+config TOUCHSCREEN_WM97XX_MAINSTONE
+       tristate "WM97xx Mainstone accelerated touch"
+       depends on TOUCHSCREEN_WM97XX && ARCH_PXA
+       help
+         Say Y here for support for streaming mode with WM97xx touchscreens
+         on Mainstone systems.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mainstone-wm97xx.
+
 config TOUCHSCREEN_USB_COMPOSITE
        tristate "USB Touchscreen Driver"
        depends on USB_ARCH_HAS_HCD
index 35d4097df35a64ddbe988376340f51c27199acf8..3c096d75651d4eb2756e302f7d865c117da85078 100644 (file)
@@ -4,6 +4,8 @@
 
 # Each configuration option enables a list of files.
 
+wm97xx-ts-y := wm97xx-core.o
+
 obj-$(CONFIG_TOUCHSCREEN_ADS7846)      += ads7846.o
 obj-$(CONFIG_TOUCHSCREEN_BITSY)                += h3600_ts_input.o
 obj-$(CONFIG_TOUCHSCREEN_CORGI)                += corgi_ts.o
@@ -19,3 +21,8 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT)    += penmount.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT)   += touchright.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN)     += touchwin.o
 obj-$(CONFIG_TOUCHSCREEN_UCB1400)      += ucb1400_ts.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX)       += wm97xx-ts.o
+wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
+wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
+wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE)     += mainstone-wm97xx.o
index 39573b91c8de4be826ff4b2a44a3c5f6f71c36c4..907a45fe9d40f5a828bd002ecd8d05d1562d353a 100644 (file)
@@ -80,6 +80,7 @@ struct ads7846 {
 #endif
 
        u16                     model;
+       u16                     vref_mv;
        u16                     vref_delay_usecs;
        u16                     x_plate_ohms;
        u16                     pressure_max;
@@ -177,9 +178,6 @@ struct ads7846 {
  * The range is GND..vREF. The ads7843 and ads7835 must use external vREF;
  * ads7846 lets that pin be unconnected, to use internal vREF.
  */
-static unsigned vREF_mV;
-module_param(vREF_mV, uint, 0);
-MODULE_PARM_DESC(vREF_mV, "external vREF voltage, in milliVolts");
 
 struct ser_req {
        u8                      ref_on;
@@ -206,7 +204,6 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
        struct ads7846          *ts = dev_get_drvdata(dev);
        struct ser_req          *req = kzalloc(sizeof *req, GFP_KERNEL);
        int                     status;
-       int                     uninitialized_var(sample);
        int                     use_internal;
 
        if (!req)
@@ -263,13 +260,13 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
 
        if (status == 0) {
                /* on-wire is a must-ignore bit, a BE12 value, then padding */
-               sample = be16_to_cpu(req->sample);
-               sample = sample >> 3;
-               sample &= 0x0fff;
+               status = be16_to_cpu(req->sample);
+               status = status >> 3;
+               status &= 0x0fff;
        }
 
        kfree(req);
-       return status ? status : sample;
+       return status;
 }
 
 #if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE)
@@ -310,7 +307,7 @@ static inline unsigned vaux_adjust(struct ads7846 *ts, ssize_t v)
        unsigned retval = v;
 
        /* external resistors may scale vAUX into 0..vREF */
-       retval *= vREF_mV;
+       retval *= ts->vref_mv;
        retval = retval >> 12;
        return retval;
 }
@@ -368,14 +365,14 @@ static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
        /* hwmon sensors need a reference voltage */
        switch (ts->model) {
        case 7846:
-               if (!vREF_mV) {
+               if (!ts->vref_mv) {
                        dev_dbg(&spi->dev, "assuming 2.5V internal vREF\n");
-                       vREF_mV = 2500;
+                       ts->vref_mv = 2500;
                }
                break;
        case 7845:
        case 7843:
-               if (!vREF_mV) {
+               if (!ts->vref_mv) {
                        dev_warn(&spi->dev,
                                "external vREF for ADS%d not specified\n",
                                ts->model);
@@ -868,6 +865,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
 
        ts->spi = spi;
        ts->input = input_dev;
+       ts->vref_mv = pdata->vref_mv;
 
        hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        ts->timer.function = ads7846_timer;
index a22576779acdc3c7e79c4148ca3cf89c358e0261..4e9d8eece2e003f8674d90a8f6ac0caaa447934f 100644 (file)
@@ -362,6 +362,7 @@ static struct platform_driver corgits_driver = {
        .resume         = corgits_resume,
        .driver         = {
                .name   = "corgi-ts",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -381,3 +382,4 @@ module_exit(corgits_exit);
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
 MODULE_DESCRIPTION("Corgi TouchScreen Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:corgi-ts");
index 42a1c9a1940ece796c099c1cc24a77f3baddc637..742242111bf1b20b5c57ea3ba2b3ec6c99d7c37c 100644 (file)
@@ -160,11 +160,15 @@ static int __devexit jornada720_ts_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:jornada_ts");
+
 static struct platform_driver jornada720_ts_driver = {
        .probe          = jornada720_ts_probe,
        .remove         = __devexit_p(jornada720_ts_remove),
        .driver         = {
                .name   = "jornada_ts",
+               .owner  = THIS_MODULE,
        },
 };
 
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
new file mode 100644 (file)
index 0000000..a79f029
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * mainstone-wm97xx.c  --  Mainstone Continuous Touch screen driver for
+ *                         Wolfson WM97xx AC97 Codecs.
+ *
+ * Copyright 2004, 2007 Wolfson Microelectronics PLC.
+ * Author: Liam Girdwood
+ *         liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
+ * Parts Copyright : Ian Molton <spyro@f2s.com>
+ *                   Andrew Zabolotny <zap@homelink.ru>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ * Notes:
+ *     This is a wm97xx extended touch driver to capture touch
+ *     data in a continuous manner on the Intel XScale archictecture
+ *
+ *  Features:
+ *       - codecs supported:- WM9705, WM9712, WM9713
+ *       - processors supported:- Intel XScale PXA25x, PXA26x, PXA27x
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/wm97xx.h>
+#include <linux/io.h>
+#include <asm/arch/pxa-regs.h>
+
+#define VERSION                "0.13"
+
+struct continuous {
+       u16 id;    /* codec id */
+       u8 code;   /* continuous code */
+       u8 reads;  /* number of coord reads per read cycle */
+       u32 speed; /* number of coords per second */
+};
+
+#define WM_READS(sp) ((sp / HZ) + 1)
+
+static const struct continuous cinfo[] = {
+       {WM9705_ID2, 0, WM_READS(94), 94},
+       {WM9705_ID2, 1, WM_READS(188), 188},
+       {WM9705_ID2, 2, WM_READS(375), 375},
+       {WM9705_ID2, 3, WM_READS(750), 750},
+       {WM9712_ID2, 0, WM_READS(94), 94},
+       {WM9712_ID2, 1, WM_READS(188), 188},
+       {WM9712_ID2, 2, WM_READS(375), 375},
+       {WM9712_ID2, 3, WM_READS(750), 750},
+       {WM9713_ID2, 0, WM_READS(94), 94},
+       {WM9713_ID2, 1, WM_READS(120), 120},
+       {WM9713_ID2, 2, WM_READS(154), 154},
+       {WM9713_ID2, 3, WM_READS(188), 188},
+};
+
+/* continuous speed index */
+static int sp_idx;
+static u16 last, tries;
+
+/*
+ * Pen sampling frequency (Hz) in continuous mode.
+ */
+static int cont_rate = 200;
+module_param(cont_rate, int, 0);
+MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)");
+
+/*
+ * Pen down detection.
+ *
+ * This driver can either poll or use an interrupt to indicate a pen down
+ * event. If the irq request fails then it will fall back to polling mode.
+ */
+static int pen_int;
+module_param(pen_int, int, 0);
+MODULE_PARM_DESC(pen_int, "Pen down detection (1 = interrupt, 0 = polling)");
+
+/*
+ * Pressure readback.
+ *
+ * Set to 1 to read back pen down pressure
+ */
+static int pressure;
+module_param(pressure, int, 0);
+MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)");
+
+/*
+ * AC97 touch data slot.
+ *
+ * Touch screen readback data ac97 slot
+ */
+static int ac97_touch_slot = 5;
+module_param(ac97_touch_slot, int, 0);
+MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
+
+
+/* flush AC97 slot 5 FIFO on pxa machines */
+#ifdef CONFIG_PXA27x
+static void wm97xx_acc_pen_up(struct wm97xx *wm)
+{
+       schedule_timeout_uninterruptible(1);
+
+       while (MISR & (1 << 2))
+               MODR;
+}
+#else
+static void wm97xx_acc_pen_up(struct wm97xx *wm)
+{
+       int count = 16;
+       schedule_timeout_uninterruptible(1);
+
+       while (count < 16) {
+               MODR;
+               count--;
+       }
+}
+#endif
+
+static int wm97xx_acc_pen_down(struct wm97xx *wm)
+{
+       u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
+       int reads = 0;
+
+       /* When the AC97 queue has been drained we need to allow time
+        * to buffer up samples otherwise we end up spinning polling
+        * for samples.  The controller can't have a suitably low
+        * threashold set to use the notifications it gives.
+        */
+       schedule_timeout_uninterruptible(1);
+
+       if (tries > 5) {
+               tries = 0;
+               return RC_PENUP;
+       }
+
+       x = MODR;
+       if (x == last) {
+               tries++;
+               return RC_AGAIN;
+       }
+       last = x;
+       do {
+               if (reads)
+                       x = MODR;
+               y = MODR;
+               if (pressure)
+                       p = MODR;
+
+               /* are samples valid */
+               if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X ||
+                   (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y ||
+                   (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES)
+                       goto up;
+
+               /* coordinate is good */
+               tries = 0;
+               input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
+               input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
+               input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
+               input_sync(wm->input_dev);
+               reads++;
+       } while (reads < cinfo[sp_idx].reads);
+up:
+       return RC_PENDOWN | RC_AGAIN;
+}
+
+static int wm97xx_acc_startup(struct wm97xx *wm)
+{
+       int idx = 0;
+
+       /* check we have a codec */
+       if (wm->ac97 == NULL)
+               return -ENODEV;
+
+       /* Go you big red fire engine */
+       for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) {
+               if (wm->id != cinfo[idx].id)
+                       continue;
+               sp_idx = idx;
+               if (cont_rate <= cinfo[idx].speed)
+                       break;
+       }
+       wm->acc_rate = cinfo[sp_idx].code;
+       wm->acc_slot = ac97_touch_slot;
+       dev_info(wm->dev,
+                "mainstone accelerated touchscreen driver, %d samples/sec\n",
+                cinfo[sp_idx].speed);
+
+       /* codec specific irq config */
+       if (pen_int) {
+               switch (wm->id) {
+               case WM9705_ID2:
+                       wm->pen_irq = IRQ_GPIO(4);
+                       set_irq_type(IRQ_GPIO(4), IRQT_BOTHEDGE);
+                       break;
+               case WM9712_ID2:
+               case WM9713_ID2:
+                       /* enable pen down interrupt */
+                       /* use PEN_DOWN GPIO 13 to assert IRQ on GPIO line 2 */
+                       wm->pen_irq = MAINSTONE_AC97_IRQ;
+                       wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
+                                          WM97XX_GPIO_POL_HIGH,
+                                          WM97XX_GPIO_STICKY,
+                                          WM97XX_GPIO_WAKE);
+                       wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT,
+                                          WM97XX_GPIO_POL_HIGH,
+                                          WM97XX_GPIO_NOTSTICKY,
+                                          WM97XX_GPIO_NOWAKE);
+                       break;
+               default:
+                       dev_err(wm->dev,
+                               "pen down irq not supported on this device\n");
+                       pen_int = 0;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static void wm97xx_acc_shutdown(struct wm97xx *wm)
+{
+       /* codec specific deconfig */
+       if (pen_int) {
+               switch (wm->id & 0xffff) {
+               case WM9705_ID2:
+                       wm->pen_irq = 0;
+                       break;
+               case WM9712_ID2:
+               case WM9713_ID2:
+                       /* disable interrupt */
+                       wm->pen_irq = 0;
+                       break;
+               }
+       }
+}
+
+static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
+{
+       if (enable)
+               enable_irq(wm->pen_irq);
+       else
+               disable_irq(wm->pen_irq);
+}
+
+static struct wm97xx_mach_ops mainstone_mach_ops = {
+       .acc_enabled = 1,
+       .acc_pen_up = wm97xx_acc_pen_up,
+       .acc_pen_down = wm97xx_acc_pen_down,
+       .acc_startup = wm97xx_acc_startup,
+       .acc_shutdown = wm97xx_acc_shutdown,
+       .irq_enable = wm97xx_irq_enable,
+       .irq_gpio = WM97XX_GPIO_2,
+};
+
+static int mainstone_wm97xx_probe(struct platform_device *pdev)
+{
+       struct wm97xx *wm = platform_get_drvdata(pdev);
+
+       return wm97xx_register_mach_ops(wm, &mainstone_mach_ops);
+}
+
+static int mainstone_wm97xx_remove(struct platform_device *pdev)
+{
+       struct wm97xx *wm = platform_get_drvdata(pdev);
+
+       wm97xx_unregister_mach_ops(wm);
+       return 0;
+}
+
+static struct platform_driver mainstone_wm97xx_driver = {
+       .probe = mainstone_wm97xx_probe,
+       .remove = mainstone_wm97xx_remove,
+       .driver = {
+               .name = "wm97xx-touch",
+       },
+};
+
+static int __init mainstone_wm97xx_init(void)
+{
+       return platform_driver_register(&mainstone_wm97xx_driver);
+}
+
+static void __exit mainstone_wm97xx_exit(void)
+{
+       platform_driver_unregister(&mainstone_wm97xx_driver);
+}
+
+module_init(mainstone_wm97xx_init);
+module_exit(mainstone_wm97xx_exit);
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood <liam.girdwood@wolfsonmicro.com>");
+MODULE_DESCRIPTION("wm97xx continuous touch driver for mainstone");
+MODULE_LICENSE("GPL");
index 607f9933aa1fd6d91f423fd62e821b17dc243b1d..bce018e45bce4781cb7b61519485502ff8c22171 100644 (file)
@@ -427,10 +427,6 @@ static int ucb1400_detect_irq(struct ucb1400 *ucb)
        unsigned long mask, timeout;
 
        mask = probe_irq_on();
-       if (!mask) {
-               probe_irq_off(mask);
-               return -EBUSY;
-       }
 
        /* Enable the ADC interrupt. */
        ucb1400_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
index 63f9664a066f8465398f4526ca07ee4fbbbf4684..3a0a8ca570767000569ebab0befa31cec6d3b3c1 100644 (file)
@@ -396,9 +396,12 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
 static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
 {
        struct usb_device *dev = usbtouch->udev;
-       int ret;
-       unsigned char buf[2];
+       int ret = -ENOMEM;
+       unsigned char *buf;
 
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               goto err_nobuf;
        /* reset */
        buf[0] = buf[1] = 0xFF;
        ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
@@ -406,9 +409,11 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
                              USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                              0, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
        if (ret < 0)
-               return ret;
-       if (buf[0] != 0x06 || buf[1] != 0x00)
-               return -ENODEV;
+               goto err_out;
+       if (buf[0] != 0x06 || buf[1] != 0x00) {
+               ret = -ENODEV;
+               goto err_out;
+       }
 
        /* set coordinate output rate */
        buf[0] = buf[1] = 0xFF;
@@ -417,20 +422,22 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
                              USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                              TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
        if (ret < 0)
-               return ret;
+               goto err_out;
        if ((buf[0] != 0x06 || buf[1] != 0x00) &&
-           (buf[0] != 0x15 || buf[1] != 0x01))
-               return -ENODEV;
+           (buf[0] != 0x15 || buf[1] != 0x01)) {
+               ret = -ENODEV;
+               goto err_out;
+       }
 
        /* start sending data */
        ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
                              TSC10_CMD_DATA1,
                              USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                              0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
-       if (ret < 0)
-               return ret;
-
-       return 0;
+err_out:
+       kfree(buf);
+err_nobuf:
+       return ret;
 }
 
 
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
new file mode 100644 (file)
index 0000000..978e1a1
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * wm9705.c  --  Codec driver for Wolfson WM9705 AC97 Codec.
+ *
+ * Copyright 2003, 2004, 2005, 2006, 2007 Wolfson Microelectronics PLC.
+ * Author: Liam Girdwood
+ *         liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
+ * Parts Copyright : Ian Molton <spyro@f2s.com>
+ *                   Andrew Zabolotny <zap@homelink.ru>
+ *                   Russell King <rmk@arm.linux.org.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/wm97xx.h>
+
+#define TS_NAME                        "wm97xx"
+#define WM9705_VERSION         "1.00"
+#define DEFAULT_PRESSURE       0xb0c0
+
+/*
+ * Module parameters
+ */
+
+/*
+ * Set current used for pressure measurement.
+ *
+ * Set pil = 2 to use 400uA
+ *     pil = 1 to use 200uA and
+ *     pil = 0 to disable pressure measurement.
+ *
+ * This is used to increase the range of values returned by the adc
+ * when measureing touchpanel pressure.
+ */
+static int pil;
+module_param(pil, int, 0);
+MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
+
+/*
+ * Set threshold for pressure measurement.
+ *
+ * Pen down pressure below threshold is ignored.
+ */
+static int pressure = DEFAULT_PRESSURE & 0xfff;
+module_param(pressure, int, 0);
+MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
+
+/*
+ * Set adc sample delay.
+ *
+ * For accurate touchpanel measurements, some settling time may be
+ * required between the switch matrix applying a voltage across the
+ * touchpanel plate and the ADC sampling the signal.
+ *
+ * This delay can be set by setting delay = n, where n is the array
+ * position of the delay in the array delay_table below.
+ * Long delays > 1ms are supported for completeness, but are not
+ * recommended.
+ */
+static int delay = 4;
+module_param(delay, int, 0);
+MODULE_PARM_DESC(delay, "Set adc sample delay.");
+
+/*
+ * Pen detect comparator threshold.
+ *
+ * 0 to Vmid in 15 steps, 0 = use zero power comparator with Vmid threshold
+ * i.e. 1 =  Vmid/15 threshold
+ *      15 =  Vmid/1 threshold
+ *
+ * Adjust this value if you are having problems with pen detect not
+ * detecting any down events.
+ */
+static int pdd = 8;
+module_param(pdd, int, 0);
+MODULE_PARM_DESC(pdd, "Set pen detect comparator threshold");
+
+/*
+ * Set adc mask function.
+ *
+ * Sources of glitch noise, such as signals driving an LCD display, may feed
+ * through to the touch screen plates and affect measurement accuracy. In
+ * order to minimise this, a signal may be applied to the MASK pin to delay or
+ * synchronise the sampling.
+ *
+ * 0 = No delay or sync
+ * 1 = High on pin stops conversions
+ * 2 = Edge triggered, edge on pin delays conversion by delay param (above)
+ * 3 = Edge triggered, edge on pin starts conversion after delay param
+ */
+static int mask;
+module_param(mask, int, 0);
+MODULE_PARM_DESC(mask, "Set adc mask function.");
+
+/*
+ * ADC sample delay times in uS
+ */
+static const int delay_table[] = {
+       21,    /* 1 AC97 Link frames */
+       42,    /* 2                  */
+       84,    /* 4                  */
+       167,   /* 8                  */
+       333,   /* 16                 */
+       667,   /* 32                 */
+       1000,  /* 48                 */
+       1333,  /* 64                 */
+       2000,  /* 96                 */
+       2667,  /* 128                */
+       3333,  /* 160                */
+       4000,  /* 192                */
+       4667,  /* 224                */
+       5333,  /* 256                */
+       6000,  /* 288                */
+       0      /* No delay, switch matrix always on */
+};
+
+/*
+ * Delay after issuing a POLL command.
+ *
+ * The delay is 3 AC97 link frames + the touchpanel settling delay
+ */
+static inline void poll_delay(int d)
+{
+       udelay(3 * AC97_LINK_FRAME + delay_table[d]);
+}
+
+/*
+ * set up the physical settings of the WM9705
+ */
+static void wm9705_phy_init(struct wm97xx *wm)
+{
+       u16 dig1 = 0, dig2 = WM97XX_RPR;
+
+       /*
+       * mute VIDEO and AUX as they share X and Y touchscreen
+       * inputs on the WM9705
+       */
+       wm97xx_reg_write(wm, AC97_AUX, 0x8000);
+       wm97xx_reg_write(wm, AC97_VIDEO, 0x8000);
+
+       /* touchpanel pressure current*/
+       if (pil == 2) {
+               dig2 |= WM9705_PIL;
+               dev_dbg(wm->dev,
+                       "setting pressure measurement current to 400uA.");
+       } else if (pil)
+               dev_dbg(wm->dev,
+                       "setting pressure measurement current to 200uA.");
+       if (!pil)
+               pressure = 0;
+
+       /* polling mode sample settling delay */
+       if (delay != 4) {
+               if (delay < 0 || delay > 15) {
+                       dev_dbg(wm->dev, "supplied delay out of range.");
+                       delay = 4;
+               }
+       }
+       dig1 &= 0xff0f;
+       dig1 |= WM97XX_DELAY(delay);
+       dev_dbg(wm->dev, "setting adc sample delay to %d u Secs.",
+               delay_table[delay]);
+
+       /* WM9705 pdd */
+       dig2 |= (pdd & 0x000f);
+       dev_dbg(wm->dev, "setting pdd to Vmid/%d", 1 - (pdd & 0x000f));
+
+       /* mask */
+       dig2 |= ((mask & 0x3) << 4);
+
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, dig1);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, dig2);
+}
+
+static void wm9705_dig_enable(struct wm97xx *wm, int enable)
+{
+       if (enable) {
+               wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2,
+                                wm->dig[2] | WM97XX_PRP_DET_DIG);
+               wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
+       } else
+               wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2,
+                                wm->dig[2] & ~WM97XX_PRP_DET_DIG);
+}
+
+static void wm9705_aux_prepare(struct wm97xx *wm)
+{
+       memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 0);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, WM97XX_PRP_DET_DIG);
+}
+
+static void wm9705_dig_restore(struct wm97xx *wm)
+{
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, wm->dig_save[1]);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, wm->dig_save[2]);
+}
+
+static inline int is_pden(struct wm97xx *wm)
+{
+       return wm->dig[2] & WM9705_PDEN;
+}
+
+/*
+ * Read a sample from the WM9705 adc in polling mode.
+ */
+static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
+{
+       int timeout = 5 * delay;
+
+       if (!wm->pen_probably_down) {
+               u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+               if (!(data & WM97XX_PEN_DOWN))
+                       return RC_PENUP;
+               wm->pen_probably_down = 1;
+       }
+
+       /* set up digitiser */
+       if (adcsel & 0x8000)
+               adcsel = ((adcsel & 0x7fff) + 3) << 12;
+
+       if (wm->mach_ops && wm->mach_ops->pre_sample)
+               wm->mach_ops->pre_sample(adcsel);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1,
+                        adcsel | WM97XX_POLL | WM97XX_DELAY(delay));
+
+       /* wait 3 AC97 time slots + delay for conversion */
+       poll_delay(delay);
+
+       /* wait for POLL to go low */
+       while ((wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER1) & WM97XX_POLL)
+              && timeout) {
+               udelay(AC97_LINK_FRAME);
+               timeout--;
+       }
+
+       if (timeout == 0) {
+               /* If PDEN is set, we can get a timeout when pen goes up */
+               if (is_pden(wm))
+                       wm->pen_probably_down = 0;
+               else
+                       dev_dbg(wm->dev, "adc sample timeout");
+               return RC_PENUP;
+       }
+
+       *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       if (wm->mach_ops && wm->mach_ops->post_sample)
+               wm->mach_ops->post_sample(adcsel);
+
+       /* check we have correct sample */
+       if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) {
+               dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel,
+               *sample & WM97XX_ADCSEL_MASK);
+               return RC_PENUP;
+       }
+
+       if (!(*sample & WM97XX_PEN_DOWN)) {
+               wm->pen_probably_down = 0;
+               return RC_PENUP;
+       }
+
+       return RC_VALID;
+}
+
+/*
+ * Sample the WM9705 touchscreen in polling mode
+ */
+static int wm9705_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
+{
+       int rc;
+
+       rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X, &data->x);
+       if (rc != RC_VALID)
+               return rc;
+       rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y);
+       if (rc != RC_VALID)
+               return rc;
+       if (pil) {
+               rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES, &data->p);
+               if (rc != RC_VALID)
+                       return rc;
+       } else
+               data->p = DEFAULT_PRESSURE;
+
+       return RC_VALID;
+}
+
+/*
+ * Enable WM9705 continuous mode, i.e. touch data is streamed across
+ * an AC97 slot
+ */
+static int wm9705_acc_enable(struct wm97xx *wm, int enable)
+{
+       u16 dig1, dig2;
+       int ret = 0;
+
+       dig1 = wm->dig[1];
+       dig2 = wm->dig[2];
+
+       if (enable) {
+               /* continous mode */
+               if (wm->mach_ops->acc_startup &&
+                   (ret = wm->mach_ops->acc_startup(wm)) < 0)
+                       return ret;
+               dig1 &= ~(WM97XX_CM_RATE_MASK | WM97XX_ADCSEL_MASK |
+                         WM97XX_DELAY_MASK | WM97XX_SLT_MASK);
+               dig1 |= WM97XX_CTC | WM97XX_COO | WM97XX_SLEN |
+                       WM97XX_DELAY(delay) |
+                       WM97XX_SLT(wm->acc_slot) |
+                       WM97XX_RATE(wm->acc_rate);
+               if (pil)
+                       dig1 |= WM97XX_ADCSEL_PRES;
+               dig2 |= WM9705_PDEN;
+       } else {
+               dig1 &= ~(WM97XX_CTC | WM97XX_COO | WM97XX_SLEN);
+               dig2 &= ~WM9705_PDEN;
+               if (wm->mach_ops->acc_shutdown)
+                       wm->mach_ops->acc_shutdown(wm);
+       }
+
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, dig1);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, dig2);
+
+       return ret;
+}
+
+struct wm97xx_codec_drv wm9705_codec = {
+       .id = WM9705_ID2,
+       .name = "wm9705",
+       .poll_sample = wm9705_poll_sample,
+       .poll_touch = wm9705_poll_touch,
+       .acc_enable = wm9705_acc_enable,
+       .phy_init = wm9705_phy_init,
+       .dig_enable = wm9705_dig_enable,
+       .dig_restore = wm9705_dig_restore,
+       .aux_prepare = wm9705_aux_prepare,
+};
+EXPORT_SYMBOL_GPL(wm9705_codec);
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood <liam.girdwood@wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM9705 Touch Screen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
new file mode 100644 (file)
index 0000000..0b6e4cf
--- /dev/null
@@ -0,0 +1,462 @@
+/*
+ * wm9712.c  --  Codec driver for Wolfson WM9712 AC97 Codecs.
+ *
+ * Copyright 2003, 2004, 2005, 2006, 2007 Wolfson Microelectronics PLC.
+ * Author: Liam Girdwood
+ *         liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
+ * Parts Copyright : Ian Molton <spyro@f2s.com>
+ *                   Andrew Zabolotny <zap@homelink.ru>
+ *                   Russell King <rmk@arm.linux.org.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/wm97xx.h>
+
+#define TS_NAME                        "wm97xx"
+#define WM9712_VERSION         "1.00"
+#define DEFAULT_PRESSURE       0xb0c0
+
+/*
+ * Module parameters
+ */
+
+/*
+ * Set internal pull up for pen detect.
+ *
+ * Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive)
+ * i.e. pull up resistance = 64k Ohms / rpu.
+ *
+ * Adjust this value if you are having problems with pen detect not
+ * detecting any down event.
+ */
+static int rpu = 8;
+module_param(rpu, int, 0);
+MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+
+/*
+ * Set current used for pressure measurement.
+ *
+ * Set pil = 2 to use 400uA
+ *     pil = 1 to use 200uA and
+ *     pil = 0 to disable pressure measurement.
+ *
+ * This is used to increase the range of values returned by the adc
+ * when measureing touchpanel pressure.
+ */
+static int pil;
+module_param(pil, int, 0);
+MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
+
+/*
+ * Set threshold for pressure measurement.
+ *
+ * Pen down pressure below threshold is ignored.
+ */
+static int pressure = DEFAULT_PRESSURE & 0xfff;
+module_param(pressure, int, 0);
+MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
+
+/*
+ * Set adc sample delay.
+ *
+ * For accurate touchpanel measurements, some settling time may be
+ * required between the switch matrix applying a voltage across the
+ * touchpanel plate and the ADC sampling the signal.
+ *
+ * This delay can be set by setting delay = n, where n is the array
+ * position of the delay in the array delay_table below.
+ * Long delays > 1ms are supported for completeness, but are not
+ * recommended.
+ */
+static int delay = 3;
+module_param(delay, int, 0);
+MODULE_PARM_DESC(delay, "Set adc sample delay.");
+
+/*
+ * Set five_wire = 1 to use a 5 wire touchscreen.
+ *
+ * NOTE: Five wire mode does not allow for readback of pressure.
+ */
+static int five_wire;
+module_param(five_wire, int, 0);
+MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen.");
+
+/*
+ * Set adc mask function.
+ *
+ * Sources of glitch noise, such as signals driving an LCD display, may feed
+ * through to the touch screen plates and affect measurement accuracy. In
+ * order to minimise this, a signal may be applied to the MASK pin to delay or
+ * synchronise the sampling.
+ *
+ * 0 = No delay or sync
+ * 1 = High on pin stops conversions
+ * 2 = Edge triggered, edge on pin delays conversion by delay param (above)
+ * 3 = Edge triggered, edge on pin starts conversion after delay param
+ */
+static int mask;
+module_param(mask, int, 0);
+MODULE_PARM_DESC(mask, "Set adc mask function.");
+
+/*
+ * Coordinate Polling Enable.
+ *
+ * Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together
+ * for every poll.
+ */
+static int coord;
+module_param(coord, int, 0);
+MODULE_PARM_DESC(coord, "Polling coordinate mode");
+
+/*
+ * ADC sample delay times in uS
+ */
+static const int delay_table[] = {
+       21,    /* 1 AC97 Link frames */
+       42,    /* 2 */
+       84,    /* 4 */
+       167,   /* 8 */
+       333,   /* 16 */
+       667,   /* 32 */
+       1000,  /* 48 */
+       1333,  /* 64 */
+       2000,  /* 96 */
+       2667,  /* 128 */
+       3333,  /* 160 */
+       4000,  /* 192 */
+       4667,  /* 224 */
+       5333,  /* 256 */
+       6000,  /* 288 */
+       0      /* No delay, switch matrix always on */
+};
+
+/*
+ * Delay after issuing a POLL command.
+ *
+ * The delay is 3 AC97 link frames + the touchpanel settling delay
+ */
+static inline void poll_delay(int d)
+{
+       udelay(3 * AC97_LINK_FRAME + delay_table[d]);
+}
+
+/*
+ * set up the physical settings of the WM9712
+ */
+static void wm9712_phy_init(struct wm97xx *wm)
+{
+       u16 dig1 = 0;
+       u16 dig2 = WM97XX_RPR | WM9712_RPU(1);
+
+       /* WM9712 rpu */
+       if (rpu) {
+               dig2 &= 0xffc0;
+               dig2 |= WM9712_RPU(rpu);
+               dev_dbg(wm->dev, "setting pen detect pull-up to %d Ohms",
+                       64000 / rpu);
+       }
+
+       /* touchpanel pressure current*/
+       if (pil == 2) {
+               dig2 |= WM9712_PIL;
+               dev_dbg(wm->dev,
+                       "setting pressure measurement current to 400uA.");
+       } else if (pil)
+               dev_dbg(wm->dev,
+                       "setting pressure measurement current to 200uA.");
+       if (!pil)
+               pressure = 0;
+
+       /* WM9712 five wire */
+       if (five_wire) {
+               dig2 |= WM9712_45W;
+               dev_dbg(wm->dev, "setting 5-wire touchscreen mode.");
+       }
+
+       /* polling mode sample settling delay */
+       if (delay < 0 || delay > 15) {
+               dev_dbg(wm->dev, "supplied delay out of range.");
+               delay = 4;
+       }
+       dig1 &= 0xff0f;
+       dig1 |= WM97XX_DELAY(delay);
+       dev_dbg(wm->dev, "setting adc sample delay to %d u Secs.",
+               delay_table[delay]);
+
+       /* mask */
+       dig2 |= ((mask & 0x3) << 6);
+       if (mask) {
+               u16 reg;
+               /* Set GPIO4 as Mask Pin*/
+               reg = wm97xx_reg_read(wm, AC97_MISC_AFE);
+               wm97xx_reg_write(wm, AC97_MISC_AFE, reg | WM97XX_GPIO_4);
+               reg = wm97xx_reg_read(wm, AC97_GPIO_CFG);
+               wm97xx_reg_write(wm, AC97_GPIO_CFG, reg | WM97XX_GPIO_4);
+       }
+
+       /* wait - coord mode */
+       if (coord)
+               dig2 |= WM9712_WAIT;
+
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, dig1);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, dig2);
+}
+
+static void wm9712_dig_enable(struct wm97xx *wm, int enable)
+{
+       u16 dig2 = wm->dig[2];
+
+       if (enable) {
+               wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2,
+                                dig2 | WM97XX_PRP_DET_DIG);
+               wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
+       } else
+               wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2,
+                                dig2 & ~WM97XX_PRP_DET_DIG);
+}
+
+static void wm9712_aux_prepare(struct wm97xx *wm)
+{
+       memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 0);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, WM97XX_PRP_DET_DIG);
+}
+
+static void wm9712_dig_restore(struct wm97xx *wm)
+{
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, wm->dig_save[1]);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, wm->dig_save[2]);
+}
+
+static inline int is_pden(struct wm97xx *wm)
+{
+       return wm->dig[2] & WM9712_PDEN;
+}
+
+/*
+ * Read a sample from the WM9712 adc in polling mode.
+ */
+static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
+{
+       int timeout = 5 * delay;
+
+       if (!wm->pen_probably_down) {
+               u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+               if (!(data & WM97XX_PEN_DOWN))
+                       return RC_PENUP;
+               wm->pen_probably_down = 1;
+       }
+
+       /* set up digitiser */
+       if (adcsel & 0x8000)
+               adcsel = ((adcsel & 0x7fff) + 3) << 12;
+
+       if (wm->mach_ops && wm->mach_ops->pre_sample)
+               wm->mach_ops->pre_sample(adcsel);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1,
+                        adcsel | WM97XX_POLL | WM97XX_DELAY(delay));
+
+       /* wait 3 AC97 time slots + delay for conversion */
+       poll_delay(delay);
+
+       /* wait for POLL to go low */
+       while ((wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER1) & WM97XX_POLL)
+              && timeout) {
+               udelay(AC97_LINK_FRAME);
+               timeout--;
+       }
+
+       if (timeout <= 0) {
+               /* If PDEN is set, we can get a timeout when pen goes up */
+               if (is_pden(wm))
+                       wm->pen_probably_down = 0;
+               else
+                       dev_dbg(wm->dev, "adc sample timeout");
+               return RC_PENUP;
+       }
+
+       *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       if (wm->mach_ops && wm->mach_ops->post_sample)
+               wm->mach_ops->post_sample(adcsel);
+
+       /* check we have correct sample */
+       if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) {
+               dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel,
+               *sample & WM97XX_ADCSEL_MASK);
+               return RC_PENUP;
+       }
+
+       if (!(*sample & WM97XX_PEN_DOWN)) {
+               wm->pen_probably_down = 0;
+               return RC_PENUP;
+       }
+
+       return RC_VALID;
+}
+
+/*
+ * Read a coord from the WM9712 adc in polling mode.
+ */
+static int wm9712_poll_coord(struct wm97xx *wm, struct wm97xx_data *data)
+{
+       int timeout = 5 * delay;
+
+       if (!wm->pen_probably_down) {
+               u16 data_rd = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+               if (!(data_rd & WM97XX_PEN_DOWN))
+                       return RC_PENUP;
+               wm->pen_probably_down = 1;
+       }
+
+       /* set up digitiser */
+       if (wm->mach_ops && wm->mach_ops->pre_sample)
+               wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
+
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1,
+               WM97XX_COO | WM97XX_POLL | WM97XX_DELAY(delay));
+
+       /* wait 3 AC97 time slots + delay for conversion and read x */
+       poll_delay(delay);
+       data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       /* wait for POLL to go low */
+       while ((wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER1) & WM97XX_POLL)
+              && timeout) {
+               udelay(AC97_LINK_FRAME);
+               timeout--;
+       }
+
+       if (timeout <= 0) {
+               /* If PDEN is set, we can get a timeout when pen goes up */
+               if (is_pden(wm))
+                       wm->pen_probably_down = 0;
+               else
+                       dev_dbg(wm->dev, "adc sample timeout");
+               return RC_PENUP;
+       }
+
+       /* read back y data */
+       data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       if (pil)
+               data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       else
+               data->p = DEFAULT_PRESSURE;
+
+       if (wm->mach_ops && wm->mach_ops->post_sample)
+               wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
+
+       /* check we have correct sample */
+       if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y))
+               goto err;
+       if (pil && !(data->p & WM97XX_ADCSEL_PRES))
+               goto err;
+
+       if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) {
+               wm->pen_probably_down = 0;
+               return RC_PENUP;
+       }
+       return RC_VALID;
+err:
+       return 0;
+}
+
+/*
+ * Sample the WM9712 touchscreen in polling mode
+ */
+static int wm9712_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
+{
+       int rc;
+
+       if (coord) {
+               rc = wm9712_poll_coord(wm, data);
+               if (rc != RC_VALID)
+                       return rc;
+       } else {
+               rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X, &data->x);
+               if (rc != RC_VALID)
+                       return rc;
+
+               rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y);
+               if (rc != RC_VALID)
+                       return rc;
+
+               if (pil && !five_wire) {
+                       rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES,
+                                               &data->p);
+                       if (rc != RC_VALID)
+                               return rc;
+               } else
+                       data->p = DEFAULT_PRESSURE;
+       }
+       return RC_VALID;
+}
+
+/*
+ * Enable WM9712 continuous mode, i.e. touch data is streamed across
+ * an AC97 slot
+ */
+static int wm9712_acc_enable(struct wm97xx *wm, int enable)
+{
+       u16 dig1, dig2;
+       int ret = 0;
+
+       dig1 = wm->dig[1];
+       dig2 = wm->dig[2];
+
+       if (enable) {
+               /* continous mode */
+               if (wm->mach_ops->acc_startup) {
+                       ret = wm->mach_ops->acc_startup(wm);
+                       if (ret < 0)
+                               return ret;
+               }
+               dig1 &= ~(WM97XX_CM_RATE_MASK | WM97XX_ADCSEL_MASK |
+                       WM97XX_DELAY_MASK | WM97XX_SLT_MASK);
+               dig1 |= WM97XX_CTC | WM97XX_COO | WM97XX_SLEN |
+                       WM97XX_DELAY(delay) |
+                       WM97XX_SLT(wm->acc_slot) |
+                       WM97XX_RATE(wm->acc_rate);
+               if (pil)
+                       dig1 |= WM97XX_ADCSEL_PRES;
+               dig2 |= WM9712_PDEN;
+       } else {
+               dig1 &= ~(WM97XX_CTC | WM97XX_COO | WM97XX_SLEN);
+               dig2 &= ~WM9712_PDEN;
+               if (wm->mach_ops->acc_shutdown)
+                       wm->mach_ops->acc_shutdown(wm);
+       }
+
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, dig1);
+       wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, dig2);
+
+       return 0;
+}
+
+struct wm97xx_codec_drv wm9712_codec = {
+       .id = WM9712_ID2,
+       .name = "wm9712",
+       .poll_sample = wm9712_poll_sample,
+       .poll_touch = wm9712_poll_touch,
+       .acc_enable = wm9712_acc_enable,
+       .phy_init = wm9712_phy_init,
+       .dig_enable = wm9712_dig_enable,
+       .dig_restore = wm9712_dig_restore,
+       .aux_prepare = wm9712_aux_prepare,
+};
+EXPORT_SYMBOL_GPL(wm9712_codec);
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood <liam.girdwood@wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM9712 Touch Screen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
new file mode 100644 (file)
index 0000000..01278bd
--- /dev/null
@@ -0,0 +1,460 @@
+/*
+ * wm9713.c  --  Codec touch driver for Wolfson WM9713 AC97 Codec.
+ *
+ * Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC.
+ * Author: Liam Girdwood
+ *         liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
+ * Parts Copyright : Ian Molton <spyro@f2s.com>
+ *                   Andrew Zabolotny <zap@homelink.ru>
+ *                   Russell King <rmk@arm.linux.org.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/wm97xx.h>
+
+#define TS_NAME                        "wm97xx"
+#define WM9713_VERSION         "1.00"
+#define DEFAULT_PRESSURE       0xb0c0
+
+/*
+ * Module parameters
+ */
+
+/*
+ * Set internal pull up for pen detect.
+ *
+ * Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive)
+ * i.e. pull up resistance = 64k Ohms / rpu.
+ *
+ * Adjust this value if you are having problems with pen detect not
+ * detecting any down event.
+ */
+static int rpu = 8;
+module_param(rpu, int, 0);
+MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+
+/*
+ * Set current used for pressure measurement.
+ *
+ * Set pil = 2 to use 400uA
+ *     pil = 1 to use 200uA and
+ *     pil = 0 to disable pressure measurement.
+ *
+ * This is used to increase the range of values returned by the adc
+ * when measureing touchpanel pressure.
+ */
+static int pil;
+module_param(pil, int, 0);
+MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
+
+/*
+ * Set threshold for pressure measurement.
+ *
+ * Pen down pressure below threshold is ignored.
+ */
+static int pressure = DEFAULT_PRESSURE & 0xfff;
+module_param(pressure, int, 0);
+MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
+
+/*
+ * Set adc sample delay.
+ *
+ * For accurate touchpanel measurements, some settling time may be
+ * required between the switch matrix applying a voltage across the
+ * touchpanel plate and the ADC sampling the signal.
+ *
+ * This delay can be set by setting delay = n, where n is the array
+ * position of the delay in the array delay_table below.
+ * Long delays > 1ms are supported for completeness, but are not
+ * recommended.
+ */
+static int delay = 4;
+module_param(delay, int, 0);
+MODULE_PARM_DESC(delay, "Set adc sample delay.");
+
+/*
+ * Set adc mask function.
+ *
+ * Sources of glitch noise, such as signals driving an LCD display, may feed
+ * through to the touch screen plates and affect measurement accuracy. In
+ * order to minimise this, a signal may be applied to the MASK pin to delay or
+ * synchronise the sampling.
+ *
+ * 0 = No delay or sync
+ * 1 = High on pin stops conversions
+ * 2 = Edge triggered, edge on pin delays conversion by delay param (above)
+ * 3 = Edge triggered, edge on pin starts conversion after delay param
+ */
+static int mask;
+module_param(mask, int, 0);
+MODULE_PARM_DESC(mask, "Set adc mask function.");
+
+/*
+ * Coordinate Polling Enable.
+ *
+ * Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together
+ * for every poll.
+ */
+static int coord;
+module_param(coord, int, 0);
+MODULE_PARM_DESC(coord, "Polling coordinate mode");
+
+/*
+ * ADC sample delay times in uS
+ */
+static const int delay_table[] = {
+       21,    /* 1 AC97 Link frames */
+       42,    /* 2 */
+       84,    /* 4 */
+       167,   /* 8 */
+       333,   /* 16 */
+       667,   /* 32 */
+       1000,  /* 48 */
+       1333,  /* 64 */
+       2000,  /* 96 */
+       2667,  /* 128 */
+       3333,  /* 160 */
+       4000,  /* 192 */
+       4667,  /* 224 */
+       5333,  /* 256 */
+       6000,  /* 288 */
+       0      /* No delay, switch matrix always on */
+};
+
+/*
+ * Delay after issuing a POLL command.
+ *
+ * The delay is 3 AC97 link frames + the touchpanel settling delay
+ */
+static inline void poll_delay(int d)
+{
+       udelay(3 * AC97_LINK_FRAME + delay_table[d]);
+}
+
+/*
+ * set up the physical settings of the WM9713
+ */
+static void wm9713_phy_init(struct wm97xx *wm)
+{
+       u16 dig1 = 0, dig2, dig3;
+
+       /* default values */
+       dig2 = WM97XX_DELAY(4) | WM97XX_SLT(5);
+       dig3 = WM9712_RPU(1);
+
+       /* rpu */
+       if (rpu) {
+               dig3 &= 0xffc0;
+               dig3 |= WM9712_RPU(rpu);
+               dev_info(wm->dev, "setting pen detect pull-up to %d Ohms\n",
+                        64000 / rpu);
+       }
+
+       /* touchpanel pressure */
+       if (pil == 2) {
+               dig3 |= WM9712_PIL;
+               dev_info(wm->dev,
+                        "setting pressure measurement current to 400uA.");
+       } else if (pil)
+               dev_info(wm->dev,
+                        "setting pressure measurement current to 200uA.");
+       if (!pil)
+               pressure = 0;
+
+       /* sample settling delay */
+       if (delay < 0 || delay > 15) {
+               dev_info(wm->dev, "supplied delay out of range.");
+               delay = 4;
+               dev_info(wm->dev, "setting adc sample delay to %d u Secs.",
+                        delay_table[delay]);
+       }
+       dig2 &= 0xff0f;
+       dig2 |= WM97XX_DELAY(delay);
+
+       /* mask */
+       dig3 |= ((mask & 0x3) << 4);
+       if (coord)
+               dig3 |= WM9713_WAIT;
+
+       wm->misc = wm97xx_reg_read(wm, 0x5a);
+
+       wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
+       wm97xx_reg_write(wm, AC97_GPIO_STICKY, 0x0);
+}
+
+static void wm9713_dig_enable(struct wm97xx *wm, int enable)
+{
+       u16 val;
+
+       if (enable) {
+               val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
+               wm97xx_reg_write(wm, AC97_EXTENDED_MID, val & 0x7fff);
+               wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] |
+                                WM97XX_PRP_DET_DIG);
+               wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
+       } else {
+               wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] &
+                                       ~WM97XX_PRP_DET_DIG);
+               val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
+               wm97xx_reg_write(wm, AC97_EXTENDED_MID, val | 0x8000);
+       }
+}
+
+static void wm9713_dig_restore(struct wm97xx *wm)
+{
+       wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig_save[0]);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig_save[1]);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig_save[2]);
+}
+
+static void wm9713_aux_prepare(struct wm97xx *wm)
+{
+       memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
+       wm97xx_reg_write(wm, AC97_WM9713_DIG1, 0);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG2, 0);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG3, WM97XX_PRP_DET_DIG);
+}
+
+static inline int is_pden(struct wm97xx *wm)
+{
+       return wm->dig[2] & WM9713_PDEN;
+}
+
+/*
+ * Read a sample from the WM9713 adc in polling mode.
+ */
+static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
+{
+       u16 dig1;
+       int timeout = 5 * delay;
+
+       if (!wm->pen_probably_down) {
+               u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+               if (!(data & WM97XX_PEN_DOWN))
+                       return RC_PENUP;
+               wm->pen_probably_down = 1;
+       }
+
+       /* set up digitiser */
+       if (adcsel & 0x8000)
+               adcsel = 1 << ((adcsel & 0x7fff) + 3);
+
+       dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
+       dig1 &= ~WM9713_ADCSEL_MASK;
+
+       if (wm->mach_ops && wm->mach_ops->pre_sample)
+               wm->mach_ops->pre_sample(adcsel);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | adcsel | WM9713_POLL);
+
+       /* wait 3 AC97 time slots + delay for conversion */
+       poll_delay(delay);
+
+       /* wait for POLL to go low */
+       while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) &&
+               timeout) {
+               udelay(AC97_LINK_FRAME);
+               timeout--;
+       }
+
+       if (timeout <= 0) {
+               /* If PDEN is set, we can get a timeout when pen goes up */
+               if (is_pden(wm))
+                       wm->pen_probably_down = 0;
+               else
+                       dev_dbg(wm->dev, "adc sample timeout");
+               return RC_PENUP;
+       }
+
+       *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       if (wm->mach_ops && wm->mach_ops->post_sample)
+               wm->mach_ops->post_sample(adcsel);
+
+       /* check we have correct sample */
+       if ((*sample & WM97XX_ADCSRC_MASK) != ffs(adcsel >> 1) << 12) {
+               dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel,
+                       *sample & WM97XX_ADCSRC_MASK);
+               return RC_PENUP;
+       }
+
+       if (!(*sample & WM97XX_PEN_DOWN)) {
+               wm->pen_probably_down = 0;
+               return RC_PENUP;
+       }
+
+       return RC_VALID;
+}
+
+/*
+ * Read a coordinate from the WM9713 adc in polling mode.
+ */
+static int wm9713_poll_coord(struct wm97xx *wm, struct wm97xx_data *data)
+{
+       u16 dig1;
+       int timeout = 5 * delay;
+
+       if (!wm->pen_probably_down) {
+               u16 val = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+               if (!(val & WM97XX_PEN_DOWN))
+                       return RC_PENUP;
+               wm->pen_probably_down = 1;
+       }
+
+       /* set up digitiser */
+       dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
+       dig1 &= ~WM9713_ADCSEL_MASK;
+       if (pil)
+               dig1 |= WM9713_ADCSEL_PRES;
+
+       if (wm->mach_ops && wm->mach_ops->pre_sample)
+               wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG1,
+                        dig1 | WM9713_POLL | WM9713_COO);
+
+       /* wait 3 AC97 time slots + delay for conversion */
+       poll_delay(delay);
+       data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       /* wait for POLL to go low */
+       while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL)
+              && timeout) {
+               udelay(AC97_LINK_FRAME);
+               timeout--;
+       }
+
+       if (timeout <= 0) {
+               /* If PDEN is set, we can get a timeout when pen goes up */
+               if (is_pden(wm))
+                       wm->pen_probably_down = 0;
+               else
+                       dev_dbg(wm->dev, "adc sample timeout");
+               return RC_PENUP;
+       }
+
+       /* read back data */
+       data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       if (pil)
+               data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
+       else
+               data->p = DEFAULT_PRESSURE;
+
+       if (wm->mach_ops && wm->mach_ops->post_sample)
+               wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
+
+       /* check we have correct sample */
+       if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y))
+               goto err;
+       if (pil && !(data->p & WM97XX_ADCSEL_PRES))
+               goto err;
+
+       if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) {
+               wm->pen_probably_down = 0;
+               return RC_PENUP;
+       }
+       return RC_VALID;
+err:
+       return 0;
+}
+
+/*
+ * Sample the WM9713 touchscreen in polling mode
+ */
+static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
+{
+       int rc;
+
+       if (coord) {
+               rc = wm9713_poll_coord(wm, data);
+               if (rc != RC_VALID)
+                       return rc;
+       } else {
+               rc = wm9713_poll_sample(wm, WM9713_ADCSEL_X, &data->x);
+               if (rc != RC_VALID)
+                       return rc;
+               rc = wm9713_poll_sample(wm, WM9713_ADCSEL_Y, &data->y);
+               if (rc != RC_VALID)
+                       return rc;
+               if (pil) {
+                       rc = wm9713_poll_sample(wm, WM9713_ADCSEL_PRES,
+                                               &data->p);
+                       if (rc != RC_VALID)
+                               return rc;
+               } else
+                       data->p = DEFAULT_PRESSURE;
+       }
+       return RC_VALID;
+}
+
+/*
+ * Enable WM9713 continuous mode, i.e. touch data is streamed across
+ * an AC97 slot
+ */
+static int wm9713_acc_enable(struct wm97xx *wm, int enable)
+{
+       u16 dig1, dig2, dig3;
+       int ret = 0;
+
+       dig1 = wm->dig[0];
+       dig2 = wm->dig[1];
+       dig3 = wm->dig[2];
+
+       if (enable) {
+               /* continous mode */
+               if (wm->mach_ops->acc_startup &&
+                       (ret = wm->mach_ops->acc_startup(wm)) < 0)
+                       return ret;
+
+               dig1 &= ~WM9713_ADCSEL_MASK;
+               dig1 |= WM9713_CTC | WM9713_COO | WM9713_ADCSEL_X |
+                       WM9713_ADCSEL_Y;
+               if (pil)
+                       dig1 |= WM9713_ADCSEL_PRES;
+               dig2 &= ~(WM97XX_DELAY_MASK | WM97XX_SLT_MASK  |
+                       WM97XX_CM_RATE_MASK);
+               dig2 |= WM97XX_SLEN | WM97XX_DELAY(delay) |
+               WM97XX_SLT(wm->acc_slot) | WM97XX_RATE(wm->acc_rate);
+               dig3 |= WM9713_PDEN;
+       } else {
+               dig1 &= ~(WM9713_CTC | WM9713_COO);
+               dig2 &= ~WM97XX_SLEN;
+               dig3 &= ~WM9713_PDEN;
+               if (wm->mach_ops->acc_shutdown)
+                       wm->mach_ops->acc_shutdown(wm);
+       }
+
+       wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
+
+       return ret;
+}
+
+struct wm97xx_codec_drv wm9713_codec = {
+       .id = WM9713_ID2,
+       .name = "wm9713",
+       .poll_sample = wm9713_poll_sample,
+       .poll_touch = wm9713_poll_touch,
+       .acc_enable = wm9713_acc_enable,
+       .phy_init = wm9713_phy_init,
+       .dig_enable = wm9713_dig_enable,
+       .dig_restore = wm9713_dig_restore,
+       .aux_prepare = wm9713_aux_prepare,
+};
+EXPORT_SYMBOL_GPL(wm9713_codec);
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood <liam.girdwood@wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM9713 Touch Screen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
new file mode 100644 (file)
index 0000000..e9c7ea4
--- /dev/null
@@ -0,0 +1,829 @@
+/*
+ * wm97xx-core.c  --  Touch screen driver core for Wolfson WM9705, WM9712
+ *                    and WM9713 AC97 Codecs.
+ *
+ * Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC.
+ * Author: Liam Girdwood
+ *         liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
+ * Parts Copyright : Ian Molton <spyro@f2s.com>
+ *                   Andrew Zabolotny <zap@homelink.ru>
+ *                   Russell King <rmk@arm.linux.org.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ * Notes:
+ *
+ *  Features:
+ *       - supports WM9705, WM9712, WM9713
+ *       - polling mode
+ *       - continuous mode (arch-dependent)
+ *       - adjustable rpu/dpp settings
+ *       - adjustable pressure current
+ *       - adjustable sample settle delay
+ *       - 4 and 5 wire touchscreens (5 wire is WM9712 only)
+ *       - pen down detection
+ *       - battery monitor
+ *       - sample AUX adcs
+ *       - power management
+ *       - codec GPIO
+ *       - codec event notification
+ * Todo
+ *       - Support for async sampling control for noisy LCDs.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/wm97xx.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#define TS_NAME                        "wm97xx"
+#define WM_CORE_VERSION                "1.00"
+#define DEFAULT_PRESSURE       0xb0c0
+
+
+/*
+ * Touchscreen absolute values
+ *
+ * These parameters are used to help the input layer discard out of
+ * range readings and reduce jitter etc.
+ *
+ *   o min, max:- indicate the min and max values your touch screen returns
+ *   o fuzz:- use a higher number to reduce jitter
+ *
+ * The default values correspond to Mainstone II in QVGA mode
+ *
+ * Please read
+ * Documentation/input/input-programming.txt for more details.
+ */
+
+static int abs_x[3] = {350, 3900, 5};
+module_param_array(abs_x, int, NULL, 0);
+MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz");
+
+static int abs_y[3] = {320, 3750, 40};
+module_param_array(abs_y, int, NULL, 0);
+MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz");
+
+static int abs_p[3] = {0, 150, 4};
+module_param_array(abs_p, int, NULL, 0);
+MODULE_PARM_DESC(abs_p, "Touchscreen absolute Pressure min, max, fuzz");
+
+/*
+ * wm97xx IO access, all IO locking done by AC97 layer
+ */
+int wm97xx_reg_read(struct wm97xx *wm, u16 reg)
+{
+       if (wm->ac97)
+               return wm->ac97->bus->ops->read(wm->ac97, reg);
+       else
+               return -1;
+}
+EXPORT_SYMBOL_GPL(wm97xx_reg_read);
+
+void wm97xx_reg_write(struct wm97xx *wm, u16 reg, u16 val)
+{
+       /* cache digitiser registers */
+       if (reg >= AC97_WM9713_DIG1 && reg <= AC97_WM9713_DIG3)
+               wm->dig[(reg - AC97_WM9713_DIG1) >> 1] = val;
+
+       /* cache gpio regs */
+       if (reg >= AC97_GPIO_CFG && reg <= AC97_MISC_AFE)
+               wm->gpio[(reg - AC97_GPIO_CFG) >> 1] = val;
+
+       /* wm9713 irq reg */
+       if (reg == 0x5a)
+               wm->misc = val;
+
+       if (wm->ac97)
+               wm->ac97->bus->ops->write(wm->ac97, reg, val);
+}
+EXPORT_SYMBOL_GPL(wm97xx_reg_write);
+
+/**
+ * wm97xx_read_aux_adc - Read the aux adc.
+ * @wm: wm97xx device.
+ * @adcsel: codec ADC to be read
+ *
+ * Reads the selected AUX ADC.
+ */
+
+int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel)
+{
+       int power_adc = 0, auxval;
+       u16 power = 0;
+
+       /* get codec */
+       mutex_lock(&wm->codec_mutex);
+
+       /* When the touchscreen is not in use, we may have to power up
+        * the AUX ADC before we can use sample the AUX inputs->
+        */
+       if (wm->id == WM9713_ID2 &&
+           (power = wm97xx_reg_read(wm, AC97_EXTENDED_MID)) & 0x8000) {
+               power_adc = 1;
+               wm97xx_reg_write(wm, AC97_EXTENDED_MID, power & 0x7fff);
+       }
+
+       /* Prepare the codec for AUX reading */
+       wm->codec->aux_prepare(wm);
+
+       /* Turn polling mode on to read AUX ADC */
+       wm->pen_probably_down = 1;
+       wm->codec->poll_sample(wm, adcsel, &auxval);
+
+       if (power_adc)
+               wm97xx_reg_write(wm, AC97_EXTENDED_MID, power | 0x8000);
+
+       wm->codec->dig_restore(wm);
+
+       wm->pen_probably_down = 0;
+
+       mutex_unlock(&wm->codec_mutex);
+       return auxval & 0xfff;
+}
+EXPORT_SYMBOL_GPL(wm97xx_read_aux_adc);
+
+/**
+ * wm97xx_get_gpio - Get the status of a codec GPIO.
+ * @wm: wm97xx device.
+ * @gpio: gpio
+ *
+ * Get the status of a codec GPIO pin
+ */
+
+enum wm97xx_gpio_status wm97xx_get_gpio(struct wm97xx *wm, u32 gpio)
+{
+       u16 status;
+       enum wm97xx_gpio_status ret;
+
+       mutex_lock(&wm->codec_mutex);
+       status = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
+
+       if (status & gpio)
+               ret = WM97XX_GPIO_HIGH;
+       else
+               ret = WM97XX_GPIO_LOW;
+
+       mutex_unlock(&wm->codec_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(wm97xx_get_gpio);
+
+/**
+ * wm97xx_set_gpio - Set the status of a codec GPIO.
+ * @wm: wm97xx device.
+ * @gpio: gpio
+ *
+ *
+ * Set the status of a codec GPIO pin
+ */
+
+void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio,
+                               enum wm97xx_gpio_status status)
+{
+       u16 reg;
+
+       mutex_lock(&wm->codec_mutex);
+       reg = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
+
+       if (status & WM97XX_GPIO_HIGH)
+               reg |= gpio;
+       else
+               reg &= ~gpio;
+
+       if (wm->id == WM9712_ID2)
+               wm97xx_reg_write(wm, AC97_GPIO_STATUS, reg << 1);
+       else
+               wm97xx_reg_write(wm, AC97_GPIO_STATUS, reg);
+       mutex_unlock(&wm->codec_mutex);
+}
+EXPORT_SYMBOL_GPL(wm97xx_set_gpio);
+
+/*
+ * Codec GPIO pin configuration, this sets pin direction, polarity,
+ * stickyness and wake up.
+ */
+void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio, enum wm97xx_gpio_dir dir,
+                  enum wm97xx_gpio_pol pol, enum wm97xx_gpio_sticky sticky,
+                  enum wm97xx_gpio_wake wake)
+{
+       u16 reg;
+
+       mutex_lock(&wm->codec_mutex);
+       reg = wm97xx_reg_read(wm, AC97_GPIO_POLARITY);
+
+       if (pol == WM97XX_GPIO_POL_HIGH)
+               reg |= gpio;
+       else
+               reg &= ~gpio;
+
+       wm97xx_reg_write(wm, AC97_GPIO_POLARITY, reg);
+       reg = wm97xx_reg_read(wm, AC97_GPIO_STICKY);
+
+       if (sticky == WM97XX_GPIO_STICKY)
+               reg |= gpio;
+       else
+               reg &= ~gpio;
+
+       wm97xx_reg_write(wm, AC97_GPIO_STICKY, reg);
+       reg = wm97xx_reg_read(wm, AC97_GPIO_WAKEUP);
+
+       if (wake == WM97XX_GPIO_WAKE)
+               reg |= gpio;
+       else
+               reg &= ~gpio;
+
+       wm97xx_reg_write(wm, AC97_GPIO_WAKEUP, reg);
+       reg = wm97xx_reg_read(wm, AC97_GPIO_CFG);
+
+       if (dir == WM97XX_GPIO_IN)
+               reg |= gpio;
+       else
+               reg &= ~gpio;
+
+       wm97xx_reg_write(wm, AC97_GPIO_CFG, reg);
+       mutex_unlock(&wm->codec_mutex);
+}
+EXPORT_SYMBOL_GPL(wm97xx_config_gpio);
+
+/*
+ * Configure the WM97XX_PRP value to use while system is suspended.
+ * If a value other than 0 is set then WM97xx pen detection will be
+ * left enabled in the configured mode while the system is in suspend,
+ * the device has users and suspend has not been disabled via the
+ * wakeup sysfs entries.
+ *
+ * @wm:   WM97xx device to configure
+ * @mode: WM97XX_PRP value to configure while suspended
+ */
+void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode)
+{
+       wm->suspend_mode = mode;
+       device_init_wakeup(&wm->input_dev->dev, mode != 0);
+}
+EXPORT_SYMBOL_GPL(wm97xx_set_suspend_mode);
+
+/*
+ * Handle a pen down interrupt.
+ */
+static void wm97xx_pen_irq_worker(struct work_struct *work)
+{
+       struct wm97xx *wm = container_of(work, struct wm97xx, pen_event_work);
+       int pen_was_down = wm->pen_is_down;
+
+       /* do we need to enable the touch panel reader */
+       if (wm->id == WM9705_ID2) {
+               if (wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD) &
+                                       WM97XX_PEN_DOWN)
+                       wm->pen_is_down = 1;
+               else
+                       wm->pen_is_down = 0;
+       } else {
+               u16 status, pol;
+               mutex_lock(&wm->codec_mutex);
+               status = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
+               pol = wm97xx_reg_read(wm, AC97_GPIO_POLARITY);
+
+               if (WM97XX_GPIO_13 & pol & status) {
+                       wm->pen_is_down = 1;
+                       wm97xx_reg_write(wm, AC97_GPIO_POLARITY, pol &
+                                               ~WM97XX_GPIO_13);
+               } else {
+                       wm->pen_is_down = 0;
+                       wm97xx_reg_write(wm, AC97_GPIO_POLARITY, pol |
+                                        WM97XX_GPIO_13);
+               }
+
+               if (wm->id == WM9712_ID2)
+                       wm97xx_reg_write(wm, AC97_GPIO_STATUS, (status &
+                                               ~WM97XX_GPIO_13) << 1);
+               else
+                       wm97xx_reg_write(wm, AC97_GPIO_STATUS, status &
+                                               ~WM97XX_GPIO_13);
+               mutex_unlock(&wm->codec_mutex);
+       }
+
+       /* If the system is not using continuous mode or it provides a
+        * pen down operation then we need to schedule polls while the
+        * pen is down.  Otherwise the machine driver is responsible
+        * for scheduling reads.
+        */
+       if (!wm->mach_ops->acc_enabled || wm->mach_ops->acc_pen_down) {
+               if (wm->pen_is_down && !pen_was_down) {
+                       /* Data is not availiable immediately on pen down */
+                       queue_delayed_work(wm->ts_workq, &wm->ts_reader, 1);
+               }
+
+               /* Let ts_reader report the pen up for debounce. */
+               if (!wm->pen_is_down && pen_was_down)
+                       wm->pen_is_down = 1;
+       }
+
+       if (!wm->pen_is_down && wm->mach_ops->acc_enabled)
+               wm->mach_ops->acc_pen_up(wm);
+
+       wm->mach_ops->irq_enable(wm, 1);
+}
+
+/*
+ * Codec PENDOWN irq handler
+ *
+ * We have to disable the codec interrupt in the handler because it
+ * can take upto 1ms to clear the interrupt source. We schedule a task
+ * in a work queue to do the actual interaction with the chip.  The
+ * interrupt is then enabled again in the slow handler when the source
+ * has been cleared.
+ */
+static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
+{
+       struct wm97xx *wm = dev_id;
+
+       if (!work_pending(&wm->pen_event_work)) {
+               wm->mach_ops->irq_enable(wm, 0);
+               queue_work(wm->ts_workq, &wm->pen_event_work);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * initialise pen IRQ handler and workqueue
+ */
+static int wm97xx_init_pen_irq(struct wm97xx *wm)
+{
+       u16 reg;
+
+       /* If an interrupt is supplied an IRQ enable operation must also be
+        * provided. */
+       BUG_ON(!wm->mach_ops->irq_enable);
+
+       if (request_irq(wm->pen_irq, wm97xx_pen_interrupt,
+                       IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+                       "wm97xx-pen", wm)) {
+               dev_err(wm->dev,
+                       "Failed to register pen down interrupt, polling");
+               wm->pen_irq = 0;
+               return -EINVAL;
+       }
+
+       /* Configure GPIO as interrupt source on WM971x */
+       if (wm->id != WM9705_ID2) {
+               BUG_ON(!wm->mach_ops->irq_gpio);
+               reg = wm97xx_reg_read(wm, AC97_MISC_AFE);
+               wm97xx_reg_write(wm, AC97_MISC_AFE,
+                               reg & ~(wm->mach_ops->irq_gpio));
+               reg = wm97xx_reg_read(wm, 0x5a);
+               wm97xx_reg_write(wm, 0x5a, reg & ~0x0001);
+       }
+
+       return 0;
+}
+
+static int wm97xx_read_samples(struct wm97xx *wm)
+{
+       struct wm97xx_data data;
+       int rc;
+
+       mutex_lock(&wm->codec_mutex);
+
+       if (wm->mach_ops && wm->mach_ops->acc_enabled)
+               rc = wm->mach_ops->acc_pen_down(wm);
+       else
+               rc = wm->codec->poll_touch(wm, &data);
+
+       if (rc & RC_PENUP) {
+               if (wm->pen_is_down) {
+                       wm->pen_is_down = 0;
+                       dev_dbg(wm->dev, "pen up\n");
+                       input_report_abs(wm->input_dev, ABS_PRESSURE, 0);
+                       input_sync(wm->input_dev);
+               } else if (!(rc & RC_AGAIN)) {
+                       /* We need high frequency updates only while
+                       * pen is down, the user never will be able to
+                       * touch screen faster than a few times per
+                       * second... On the other hand, when the user
+                       * is actively working with the touchscreen we
+                       * don't want to lose the quick response. So we
+                       * will slowly increase sleep time after the
+                       * pen is up and quicky restore it to ~one task
+                       * switch when pen is down again.
+                       */
+                       if (wm->ts_reader_interval < HZ / 10)
+                               wm->ts_reader_interval++;
+               }
+
+       } else if (rc & RC_VALID) {
+               dev_dbg(wm->dev,
+                       "pen down: x=%x:%d, y=%x:%d, pressure=%x:%d\n",
+                       data.x >> 12, data.x & 0xfff, data.y >> 12,
+                       data.y & 0xfff, data.p >> 12, data.p & 0xfff);
+               input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff);
+               input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff);
+               input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff);
+               input_sync(wm->input_dev);
+               wm->pen_is_down = 1;
+               wm->ts_reader_interval = wm->ts_reader_min_interval;
+       } else if (rc & RC_PENDOWN) {
+               dev_dbg(wm->dev, "pen down\n");
+               wm->pen_is_down = 1;
+               wm->ts_reader_interval = wm->ts_reader_min_interval;
+       }
+
+       mutex_unlock(&wm->codec_mutex);
+       return rc;
+}
+
+/*
+* The touchscreen sample reader.
+*/
+static void wm97xx_ts_reader(struct work_struct *work)
+{
+       int rc;
+       struct wm97xx *wm = container_of(work, struct wm97xx, ts_reader.work);
+
+       BUG_ON(!wm->codec);
+
+       do {
+               rc = wm97xx_read_samples(wm);
+       } while (rc & RC_AGAIN);
+
+       if (wm->pen_is_down || !wm->pen_irq)
+               queue_delayed_work(wm->ts_workq, &wm->ts_reader,
+                                  wm->ts_reader_interval);
+}
+
+/**
+ * wm97xx_ts_input_open - Open the touch screen input device.
+ * @idev:      Input device to be opened.
+ *
+ * Called by the input sub system to open a wm97xx touchscreen device.
+ * Starts the touchscreen thread and touch digitiser.
+ */
+static int wm97xx_ts_input_open(struct input_dev *idev)
+{
+       struct wm97xx *wm = input_get_drvdata(idev);
+
+       wm->ts_workq = create_singlethread_workqueue("kwm97xx");
+       if (wm->ts_workq == NULL) {
+               dev_err(wm->dev,
+                       "Failed to create workqueue\n");
+               return -EINVAL;
+       }
+
+       /* start digitiser */
+       if (wm->mach_ops && wm->mach_ops->acc_enabled)
+               wm->codec->acc_enable(wm, 1);
+       wm->codec->dig_enable(wm, 1);
+
+       INIT_DELAYED_WORK(&wm->ts_reader, wm97xx_ts_reader);
+       INIT_WORK(&wm->pen_event_work, wm97xx_pen_irq_worker);
+
+       wm->ts_reader_min_interval = HZ >= 100 ? HZ / 100 : 1;
+       if (wm->ts_reader_min_interval < 1)
+               wm->ts_reader_min_interval = 1;
+       wm->ts_reader_interval = wm->ts_reader_min_interval;
+
+       wm->pen_is_down = 0;
+       if (wm->pen_irq)
+               wm97xx_init_pen_irq(wm);
+       else
+               dev_err(wm->dev, "No IRQ specified\n");
+
+       /* If we either don't have an interrupt for pen down events or
+        * failed to acquire it then we need to poll.
+        */
+       if (wm->pen_irq == 0)
+               queue_delayed_work(wm->ts_workq, &wm->ts_reader,
+                                  wm->ts_reader_interval);
+
+       return 0;
+}
+
+/**
+ * wm97xx_ts_input_close - Close the touch screen input device.
+ * @idev:      Input device to be closed.
+ *
+ * Called by the input sub system to close a wm97xx touchscreen
+ * device.  Kills the touchscreen thread and stops the touch
+ * digitiser.
+ */
+
+static void wm97xx_ts_input_close(struct input_dev *idev)
+{
+       struct wm97xx *wm = input_get_drvdata(idev);
+       u16 reg;
+
+       if (wm->pen_irq) {
+               /* Return the interrupt to GPIO usage (disabling it) */
+               if (wm->id != WM9705_ID2) {
+                       BUG_ON(!wm->mach_ops->irq_gpio);
+                       reg = wm97xx_reg_read(wm, AC97_MISC_AFE);
+                       wm97xx_reg_write(wm, AC97_MISC_AFE,
+                                        reg | wm->mach_ops->irq_gpio);
+               }
+
+               free_irq(wm->pen_irq, wm);
+       }
+
+       wm->pen_is_down = 0;
+
+       /* Balance out interrupt disables/enables */
+       if (cancel_work_sync(&wm->pen_event_work))
+               wm->mach_ops->irq_enable(wm, 1);
+
+       /* ts_reader rearms itself so we need to explicitly stop it
+        * before we destroy the workqueue.
+        */
+       cancel_delayed_work_sync(&wm->ts_reader);
+
+       destroy_workqueue(wm->ts_workq);
+
+       /* stop digitiser */
+       wm->codec->dig_enable(wm, 0);
+       if (wm->mach_ops && wm->mach_ops->acc_enabled)
+               wm->codec->acc_enable(wm, 0);
+}
+
+static int wm97xx_probe(struct device *dev)
+{
+       struct wm97xx *wm;
+       int ret = 0, id = 0;
+
+       wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL);
+       if (!wm)
+               return -ENOMEM;
+       mutex_init(&wm->codec_mutex);
+
+       wm->dev = dev;
+       dev->driver_data = wm;
+       wm->ac97 = to_ac97_t(dev);
+
+       /* check that we have a supported codec */
+       id = wm97xx_reg_read(wm, AC97_VENDOR_ID1);
+       if (id != WM97XX_ID1) {
+               dev_err(dev, "Device with vendor %04x is not a wm97xx\n", id);
+               ret = -ENODEV;
+               goto alloc_err;
+       }
+
+       wm->id = wm97xx_reg_read(wm, AC97_VENDOR_ID2);
+
+       dev_info(wm->dev, "detected a wm97%02x codec\n", wm->id & 0xff);
+
+       switch (wm->id & 0xff) {
+#ifdef CONFIG_TOUCHSCREEN_WM9705
+       case 0x05:
+               wm->codec = &wm9705_codec;
+               break;
+#endif
+#ifdef CONFIG_TOUCHSCREEN_WM9712
+       case 0x12:
+               wm->codec = &wm9712_codec;
+               break;
+#endif
+#ifdef CONFIG_TOUCHSCREEN_WM9713
+       case 0x13:
+               wm->codec = &wm9713_codec;
+               break;
+#endif
+       default:
+               dev_err(wm->dev, "Support for wm97%02x not compiled in.\n",
+                       wm->id & 0xff);
+               ret = -ENODEV;
+               goto alloc_err;
+       }
+
+       wm->input_dev = input_allocate_device();
+       if (wm->input_dev == NULL) {
+               ret = -ENOMEM;
+               goto alloc_err;
+       }
+
+       /* set up touch configuration */
+       wm->input_dev->name = "wm97xx touchscreen";
+       wm->input_dev->open = wm97xx_ts_input_open;
+       wm->input_dev->close = wm97xx_ts_input_close;
+       set_bit(EV_ABS, wm->input_dev->evbit);
+       set_bit(ABS_X, wm->input_dev->absbit);
+       set_bit(ABS_Y, wm->input_dev->absbit);
+       set_bit(ABS_PRESSURE, wm->input_dev->absbit);
+       input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
+                            abs_x[2], 0);
+       input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
+                            abs_y[2], 0);
+       input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
+                            abs_p[2], 0);
+       input_set_drvdata(wm->input_dev, wm);
+       wm->input_dev->dev.parent = dev;
+       ret = input_register_device(wm->input_dev);
+       if (ret < 0)
+               goto dev_alloc_err;
+
+       /* set up physical characteristics */
+       wm->codec->phy_init(wm);
+
+       /* load gpio cache */
+       wm->gpio[0] = wm97xx_reg_read(wm, AC97_GPIO_CFG);
+       wm->gpio[1] = wm97xx_reg_read(wm, AC97_GPIO_POLARITY);
+       wm->gpio[2] = wm97xx_reg_read(wm, AC97_GPIO_STICKY);
+       wm->gpio[3] = wm97xx_reg_read(wm, AC97_GPIO_WAKEUP);
+       wm->gpio[4] = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
+       wm->gpio[5] = wm97xx_reg_read(wm, AC97_MISC_AFE);
+
+       /* register our battery device */
+       wm->battery_dev = platform_device_alloc("wm97xx-battery", -1);
+       if (!wm->battery_dev) {
+               ret = -ENOMEM;
+               goto batt_err;
+       }
+       platform_set_drvdata(wm->battery_dev, wm);
+       wm->battery_dev->dev.parent = dev;
+       ret = platform_device_add(wm->battery_dev);
+       if (ret < 0)
+               goto batt_reg_err;
+
+       /* register our extended touch device (for machine specific
+        * extensions) */
+       wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
+       if (!wm->touch_dev) {
+               ret = -ENOMEM;
+               goto touch_err;
+       }
+       platform_set_drvdata(wm->touch_dev, wm);
+       wm->touch_dev->dev.parent = dev;
+       ret = platform_device_add(wm->touch_dev);
+       if (ret < 0)
+               goto touch_reg_err;
+
+       return ret;
+
+ touch_reg_err:
+       platform_device_put(wm->touch_dev);
+ touch_err:
+       platform_device_unregister(wm->battery_dev);
+       wm->battery_dev = NULL;
+ batt_reg_err:
+       platform_device_put(wm->battery_dev);
+ batt_err:
+       input_unregister_device(wm->input_dev);
+       wm->input_dev = NULL;
+ dev_alloc_err:
+       input_free_device(wm->input_dev);
+ alloc_err:
+       kfree(wm);
+
+       return ret;
+}
+
+static int wm97xx_remove(struct device *dev)
+{
+       struct wm97xx *wm = dev_get_drvdata(dev);
+
+       platform_device_unregister(wm->battery_dev);
+       platform_device_unregister(wm->touch_dev);
+       input_unregister_device(wm->input_dev);
+       kfree(wm);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm97xx_suspend(struct device *dev, pm_message_t state)
+{
+       struct wm97xx *wm = dev_get_drvdata(dev);
+       u16 reg;
+       int suspend_mode;
+
+       if (device_may_wakeup(&wm->input_dev->dev))
+               suspend_mode = wm->suspend_mode;
+       else
+               suspend_mode = 0;
+
+       if (wm->input_dev->users)
+               cancel_delayed_work_sync(&wm->ts_reader);
+
+       /* Power down the digitiser (bypassing the cache for resume) */
+       reg = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER2);
+       reg &= ~WM97XX_PRP_DET_DIG;
+       if (wm->input_dev->users)
+               reg |= suspend_mode;
+       wm->ac97->bus->ops->write(wm->ac97, AC97_WM97XX_DIGITISER2, reg);
+
+       /* WM9713 has an additional power bit - turn it off if there
+        * are no users or if suspend mode is zero. */
+       if (wm->id == WM9713_ID2 &&
+           (!wm->input_dev->users || !suspend_mode)) {
+               reg = wm97xx_reg_read(wm, AC97_EXTENDED_MID) | 0x8000;
+               wm97xx_reg_write(wm, AC97_EXTENDED_MID, reg);
+       }
+
+       return 0;
+}
+
+static int wm97xx_resume(struct device *dev)
+{
+       struct wm97xx *wm = dev_get_drvdata(dev);
+
+       /* restore digitiser and gpios */
+       if (wm->id == WM9713_ID2) {
+               wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig[0]);
+               wm97xx_reg_write(wm, 0x5a, wm->misc);
+               if (wm->input_dev->users) {
+                       u16 reg;
+                       reg = wm97xx_reg_read(wm, AC97_EXTENDED_MID) & 0x7fff;
+                       wm97xx_reg_write(wm, AC97_EXTENDED_MID, reg);
+               }
+       }
+
+       wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig[1]);
+       wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2]);
+
+       wm97xx_reg_write(wm, AC97_GPIO_CFG, wm->gpio[0]);
+       wm97xx_reg_write(wm, AC97_GPIO_POLARITY, wm->gpio[1]);
+       wm97xx_reg_write(wm, AC97_GPIO_STICKY, wm->gpio[2]);
+       wm97xx_reg_write(wm, AC97_GPIO_WAKEUP, wm->gpio[3]);
+       wm97xx_reg_write(wm, AC97_GPIO_STATUS, wm->gpio[4]);
+       wm97xx_reg_write(wm, AC97_MISC_AFE, wm->gpio[5]);
+
+       if (wm->input_dev->users && !wm->pen_irq) {
+               wm->ts_reader_interval = wm->ts_reader_min_interval;
+               queue_delayed_work(wm->ts_workq, &wm->ts_reader,
+                                  wm->ts_reader_interval);
+       }
+
+       return 0;
+}
+
+#else
+#define wm97xx_suspend         NULL
+#define wm97xx_resume          NULL
+#endif
+
+/*
+ * Machine specific operations
+ */
+int wm97xx_register_mach_ops(struct wm97xx *wm,
+                            struct wm97xx_mach_ops *mach_ops)
+{
+       mutex_lock(&wm->codec_mutex);
+       if (wm->mach_ops) {
+               mutex_unlock(&wm->codec_mutex);
+               return -EINVAL;
+       }
+       wm->mach_ops = mach_ops;
+       mutex_unlock(&wm->codec_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wm97xx_register_mach_ops);
+
+void wm97xx_unregister_mach_ops(struct wm97xx *wm)
+{
+       mutex_lock(&wm->codec_mutex);
+       wm->mach_ops = NULL;
+       mutex_unlock(&wm->codec_mutex);
+}
+EXPORT_SYMBOL_GPL(wm97xx_unregister_mach_ops);
+
+static struct device_driver wm97xx_driver = {
+       .name =         "ac97",
+       .bus =          &ac97_bus_type,
+       .owner =        THIS_MODULE,
+       .probe =        wm97xx_probe,
+       .remove =       wm97xx_remove,
+       .suspend =      wm97xx_suspend,
+       .resume =       wm97xx_resume,
+};
+
+static int __init wm97xx_init(void)
+{
+       return driver_register(&wm97xx_driver);
+}
+
+static void __exit wm97xx_exit(void)
+{
+       driver_unregister(&wm97xx_driver);
+}
+
+module_init(wm97xx_init);
+module_exit(wm97xx_exit);
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood <liam.girdwood@wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM97xx Core - Touch Screen / AUX ADC / GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
new file mode 100644 (file)
index 0000000..0f47f46
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+ * Xen para-virtual input device
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ *
+ *  Based on linux/drivers/input/mouse/sermouse.c
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive for
+ *  more details.
+ */
+
+/*
+ * TODO:
+ *
+ * Switch to grant tables together with xen-fbfront.c.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <xen/interface/io/fbif.h>
+#include <xen/interface/io/kbdif.h>
+#include <xen/xenbus.h>
+
+struct xenkbd_info {
+       struct input_dev *kbd;
+       struct input_dev *ptr;
+       struct xenkbd_page *page;
+       int irq;
+       struct xenbus_device *xbdev;
+       char phys[32];
+};
+
+static int xenkbd_remove(struct xenbus_device *);
+static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
+static void xenkbd_disconnect_backend(struct xenkbd_info *);
+
+/*
+ * Note: if you need to send out events, see xenfb_do_update() for how
+ * to do that.
+ */
+
+static irqreturn_t input_handler(int rq, void *dev_id)
+{
+       struct xenkbd_info *info = dev_id;
+       struct xenkbd_page *page = info->page;
+       __u32 cons, prod;
+
+       prod = page->in_prod;
+       if (prod == page->in_cons)
+               return IRQ_HANDLED;
+       rmb();                  /* ensure we see ring contents up to prod */
+       for (cons = page->in_cons; cons != prod; cons++) {
+               union xenkbd_in_event *event;
+               struct input_dev *dev;
+               event = &XENKBD_IN_RING_REF(page, cons);
+
+               dev = info->ptr;
+               switch (event->type) {
+               case XENKBD_TYPE_MOTION:
+                       input_report_rel(dev, REL_X, event->motion.rel_x);
+                       input_report_rel(dev, REL_Y, event->motion.rel_y);
+                       break;
+               case XENKBD_TYPE_KEY:
+                       dev = NULL;
+                       if (test_bit(event->key.keycode, info->kbd->keybit))
+                               dev = info->kbd;
+                       if (test_bit(event->key.keycode, info->ptr->keybit))
+                               dev = info->ptr;
+                       if (dev)
+                               input_report_key(dev, event->key.keycode,
+                                                event->key.pressed);
+                       else
+                               printk(KERN_WARNING
+                                      "xenkbd: unhandled keycode 0x%x\n",
+                                      event->key.keycode);
+                       break;
+               case XENKBD_TYPE_POS:
+                       input_report_abs(dev, ABS_X, event->pos.abs_x);
+                       input_report_abs(dev, ABS_Y, event->pos.abs_y);
+                       break;
+               }
+               if (dev)
+                       input_sync(dev);
+       }
+       mb();                   /* ensure we got ring contents */
+       page->in_cons = cons;
+       notify_remote_via_irq(info->irq);
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit xenkbd_probe(struct xenbus_device *dev,
+                                 const struct xenbus_device_id *id)
+{
+       int ret, i;
+       struct xenkbd_info *info;
+       struct input_dev *kbd, *ptr;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+               return -ENOMEM;
+       }
+       dev->dev.driver_data = info;
+       info->xbdev = dev;
+       info->irq = -1;
+       snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
+
+       info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       if (!info->page)
+               goto error_nomem;
+
+       /* keyboard */
+       kbd = input_allocate_device();
+       if (!kbd)
+               goto error_nomem;
+       kbd->name = "Xen Virtual Keyboard";
+       kbd->phys = info->phys;
+       kbd->id.bustype = BUS_PCI;
+       kbd->id.vendor = 0x5853;
+       kbd->id.product = 0xffff;
+       kbd->evbit[0] = BIT(EV_KEY);
+       for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
+               set_bit(i, kbd->keybit);
+       for (i = KEY_OK; i < KEY_MAX; i++)
+               set_bit(i, kbd->keybit);
+
+       ret = input_register_device(kbd);
+       if (ret) {
+               input_free_device(kbd);
+               xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
+               goto error;
+       }
+       info->kbd = kbd;
+
+       /* pointing device */
+       ptr = input_allocate_device();
+       if (!ptr)
+               goto error_nomem;
+       ptr->name = "Xen Virtual Pointer";
+       ptr->phys = info->phys;
+       ptr->id.bustype = BUS_PCI;
+       ptr->id.vendor = 0x5853;
+       ptr->id.product = 0xfffe;
+       ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
+       for (i = BTN_LEFT; i <= BTN_TASK; i++)
+               set_bit(i, ptr->keybit);
+       ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
+       input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
+       input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
+
+       ret = input_register_device(ptr);
+       if (ret) {
+               input_free_device(ptr);
+               xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
+               goto error;
+       }
+       info->ptr = ptr;
+
+       ret = xenkbd_connect_backend(dev, info);
+       if (ret < 0)
+               goto error;
+
+       return 0;
+
+ error_nomem:
+       ret = -ENOMEM;
+       xenbus_dev_fatal(dev, ret, "allocating device memory");
+ error:
+       xenkbd_remove(dev);
+       return ret;
+}
+
+static int xenkbd_resume(struct xenbus_device *dev)
+{
+       struct xenkbd_info *info = dev->dev.driver_data;
+
+       xenkbd_disconnect_backend(info);
+       memset(info->page, 0, PAGE_SIZE);
+       return xenkbd_connect_backend(dev, info);
+}
+
+static int xenkbd_remove(struct xenbus_device *dev)
+{
+       struct xenkbd_info *info = dev->dev.driver_data;
+
+       xenkbd_disconnect_backend(info);
+       if (info->kbd)
+               input_unregister_device(info->kbd);
+       if (info->ptr)
+               input_unregister_device(info->ptr);
+       free_page((unsigned long)info->page);
+       kfree(info);
+       return 0;
+}
+
+static int xenkbd_connect_backend(struct xenbus_device *dev,
+                                 struct xenkbd_info *info)
+{
+       int ret, evtchn;
+       struct xenbus_transaction xbt;
+
+       ret = xenbus_alloc_evtchn(dev, &evtchn);
+       if (ret)
+               return ret;
+       ret = bind_evtchn_to_irqhandler(evtchn, input_handler,
+                                       0, dev->devicetype, info);
+       if (ret < 0) {
+               xenbus_free_evtchn(dev, evtchn);
+               xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
+               return ret;
+       }
+       info->irq = ret;
+
+ again:
+       ret = xenbus_transaction_start(&xbt);
+       if (ret) {
+               xenbus_dev_fatal(dev, ret, "starting transaction");
+               return ret;
+       }
+       ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
+                           virt_to_mfn(info->page));
+       if (ret)
+               goto error_xenbus;
+       ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+                           evtchn);
+       if (ret)
+               goto error_xenbus;
+       ret = xenbus_transaction_end(xbt, 0);
+       if (ret) {
+               if (ret == -EAGAIN)
+                       goto again;
+               xenbus_dev_fatal(dev, ret, "completing transaction");
+               return ret;
+       }
+
+       xenbus_switch_state(dev, XenbusStateInitialised);
+       return 0;
+
+ error_xenbus:
+       xenbus_transaction_end(xbt, 1);
+       xenbus_dev_fatal(dev, ret, "writing xenstore");
+       return ret;
+}
+
+static void xenkbd_disconnect_backend(struct xenkbd_info *info)
+{
+       if (info->irq >= 0)
+               unbind_from_irqhandler(info->irq, info);
+       info->irq = -1;
+}
+
+static void xenkbd_backend_changed(struct xenbus_device *dev,
+                                  enum xenbus_state backend_state)
+{
+       struct xenkbd_info *info = dev->dev.driver_data;
+       int ret, val;
+
+       switch (backend_state) {
+       case XenbusStateInitialising:
+       case XenbusStateInitialised:
+       case XenbusStateUnknown:
+       case XenbusStateClosed:
+               break;
+
+       case XenbusStateInitWait:
+InitWait:
+               ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                                  "feature-abs-pointer", "%d", &val);
+               if (ret < 0)
+                       val = 0;
+               if (val) {
+                       ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
+                                           "request-abs-pointer", "1");
+                       if (ret)
+                               printk(KERN_WARNING
+                                      "xenkbd: can't request abs-pointer");
+               }
+               xenbus_switch_state(dev, XenbusStateConnected);
+               break;
+
+       case XenbusStateConnected:
+               /*
+                * Work around xenbus race condition: If backend goes
+                * through InitWait to Connected fast enough, we can
+                * get Connected twice here.
+                */
+               if (dev->state != XenbusStateConnected)
+                       goto InitWait; /* no InitWait seen yet, fudge it */
+               break;
+
+       case XenbusStateClosing:
+               xenbus_frontend_closed(dev);
+               break;
+       }
+}
+
+static struct xenbus_device_id xenkbd_ids[] = {
+       { "vkbd" },
+       { "" }
+};
+
+static struct xenbus_driver xenkbd = {
+       .name = "vkbd",
+       .owner = THIS_MODULE,
+       .ids = xenkbd_ids,
+       .probe = xenkbd_probe,
+       .remove = xenkbd_remove,
+       .resume = xenkbd_resume,
+       .otherend_changed = xenkbd_backend_changed,
+};
+
+static int __init xenkbd_init(void)
+{
+       if (!is_running_on_xen())
+               return -ENODEV;
+
+       /* Nothing to do if running in dom0. */
+       if (is_initial_xendomain())
+               return -ENODEV;
+
+       return xenbus_register_frontend(&xenkbd);
+}
+
+static void __exit xenkbd_cleanup(void)
+{
+       xenbus_unregister_driver(&xenkbd);
+}
+
+module_init(xenkbd_init);
+module_exit(xenkbd_cleanup);
+
+MODULE_LICENSE("GPL");
index 89302309da92d56f397096ad8412a4e7fa2ef1d4..f972ff377b6362483f1bd2c312cba5c780165f86 100644 (file)
@@ -103,6 +103,9 @@ int mac_hid_mouse_emulate_buttons(int caller, unsigned int keycode, int down)
        return 0;
 }
 
+static struct lock_class_key emumousebtn_event_class;
+static struct lock_class_key emumousebtn_mutex_class;
+
 static int emumousebtn_input_register(void)
 {
        int ret;
@@ -111,6 +114,9 @@ static int emumousebtn_input_register(void)
        if (!emumousebtn)
                return -ENOMEM;
 
+       lockdep_set_class(emumousebtn->event_lock, &emumousebtn_event_class);
+       lockdep_set_class(emumousebtn->mutex, &emumousebtn_mutex_class);
+
        emumousebtn->name = "Macintosh mouse button emulation";
        emumousebtn->id.bustype = BUS_ADB;
        emumousebtn->id.vendor = 0x0001;
index d9aa7edb87801ec0e7f36b7614f4a23f97827e5c..7be09eeea293c307c6213bc689840f5c1ac90c4d 100644 (file)
@@ -3,10 +3,10 @@
 #
 
 dm-mod-objs    := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
-                  dm-ioctl.o dm-io.o kcopyd.o
+                  dm-ioctl.o dm-io.o dm-kcopyd.o
 dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
 dm-snapshot-objs := dm-snap.o dm-exception-store.o
-dm-mirror-objs := dm-log.o dm-raid1.o
+dm-mirror-objs := dm-raid1.o
 dm-rdac-objs   := dm-mpath-rdac.o
 dm-hp-sw-objs  := dm-mpath-hp-sw.o
 md-mod-objs     := md.o bitmap.o
@@ -39,7 +39,7 @@ obj-$(CONFIG_DM_MULTIPATH_EMC)        += dm-emc.o
 obj-$(CONFIG_DM_MULTIPATH_HP)  += dm-hp-sw.o
 obj-$(CONFIG_DM_MULTIPATH_RDAC)        += dm-rdac.o
 obj-$(CONFIG_DM_SNAPSHOT)      += dm-snapshot.o
-obj-$(CONFIG_DM_MIRROR)                += dm-mirror.o
+obj-$(CONFIG_DM_MIRROR)                += dm-mirror.o dm-log.o
 obj-$(CONFIG_DM_ZERO)          += dm-zero.o
 
 quiet_cmd_unroll = UNROLL  $@
index 5bbce29f143ac45f6d358a9b26217b3d30ee1ccd..41f408068a7c9c2329c61eb7e3eb47a82af6e280 100644 (file)
@@ -9,13 +9,13 @@
 
 #include "dm.h"
 #include "dm-snap.h"
-#include "dm-io.h"
-#include "kcopyd.h"
 
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
 
 #define DM_MSG_PREFIX "snapshots"
 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32       /* 16KB */
@@ -131,7 +131,7 @@ struct pstore {
 
 static unsigned sectors_to_pages(unsigned sectors)
 {
-       return sectors / (PAGE_SIZE >> 9);
+       return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
 }
 
 static int alloc_area(struct pstore *ps)
@@ -159,7 +159,7 @@ static void free_area(struct pstore *ps)
 }
 
 struct mdata_req {
-       struct io_region *where;
+       struct dm_io_region *where;
        struct dm_io_request *io_req;
        struct work_struct work;
        int result;
@@ -177,7 +177,7 @@ static void do_metadata(struct work_struct *work)
  */
 static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
 {
-       struct io_region where = {
+       struct dm_io_region where = {
                .bdev = ps->snap->cow->bdev,
                .sector = ps->snap->chunk_size * chunk,
                .count = ps->snap->chunk_size,
index 8f25f628ef16d5e680129656d3a1885c115a432d..4789c42d9a3ac503d53ba6ea77fe7fadbee21682 100644 (file)
@@ -5,13 +5,14 @@
  * This file is released under the GPL.
  */
 
-#include "dm-io.h"
+#include "dm.h"
 
 #include <linux/bio.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/dm-io.h>
 
 struct dm_io_client {
        mempool_t *pool;
@@ -20,7 +21,7 @@ struct dm_io_client {
 
 /* FIXME: can we shrink this ? */
 struct io {
-       unsigned long error;
+       unsigned long error_bits;
        atomic_t count;
        struct task_struct *sleeper;
        struct dm_io_client *client;
@@ -107,14 +108,14 @@ static inline unsigned bio_get_region(struct bio *bio)
 static void dec_count(struct io *io, unsigned int region, int error)
 {
        if (error)
-               set_bit(region, &io->error);
+               set_bit(region, &io->error_bits);
 
        if (atomic_dec_and_test(&io->count)) {
                if (io->sleeper)
                        wake_up_process(io->sleeper);
 
                else {
-                       unsigned long r = io->error;
+                       unsigned long r = io->error_bits;
                        io_notify_fn fn = io->callback;
                        void *context = io->context;
 
@@ -271,7 +272,7 @@ static void km_dp_init(struct dpages *dp, void *data)
 /*-----------------------------------------------------------------
  * IO routines that accept a list of pages.
  *---------------------------------------------------------------*/
-static void do_region(int rw, unsigned int region, struct io_region *where,
+static void do_region(int rw, unsigned region, struct dm_io_region *where,
                      struct dpages *dp, struct io *io)
 {
        struct bio *bio;
@@ -320,7 +321,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
 }
 
 static void dispatch_io(int rw, unsigned int num_regions,
-                       struct io_region *where, struct dpages *dp,
+                       struct dm_io_region *where, struct dpages *dp,
                        struct io *io, int sync)
 {
        int i;
@@ -347,17 +348,17 @@ static void dispatch_io(int rw, unsigned int num_regions,
 }
 
 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
-                  struct io_region *where, int rw, struct dpages *dp,
+                  struct dm_io_region *where, int rw, struct dpages *dp,
                   unsigned long *error_bits)
 {
        struct io io;
 
-       if (num_regions > 1 && rw != WRITE) {
+       if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
                WARN_ON(1);
                return -EIO;
        }
 
-       io.error = 0;
+       io.error_bits = 0;
        atomic_set(&io.count, 1); /* see dispatch_io() */
        io.sleeper = current;
        io.client = client;
@@ -378,25 +379,25 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
                return -EINTR;
 
        if (error_bits)
-               *error_bits = io.error;
+               *error_bits = io.error_bits;
 
-       return io.error ? -EIO : 0;
+       return io.error_bits ? -EIO : 0;
 }
 
 static int async_io(struct dm_io_client *client, unsigned int num_regions,
-                   struct io_region *where, int rw, struct dpages *dp,
+                   struct dm_io_region *where, int rw, struct dpages *dp,
                    io_notify_fn fn, void *context)
 {
        struct io *io;
 
-       if (num_regions > 1 && rw != WRITE) {
+       if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
                WARN_ON(1);
                fn(1, context);
                return -EIO;
        }
 
        io = mempool_alloc(client->pool, GFP_NOIO);
-       io->error = 0;
+       io->error_bits = 0;
        atomic_set(&io->count, 1); /* see dispatch_io() */
        io->sleeper = NULL;
        io->client = client;
@@ -435,10 +436,15 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
 }
 
 /*
- * New collapsed (a)synchronous interface
+ * New collapsed (a)synchronous interface.
+ *
+ * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
+ * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
+ * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
-         struct io_region *where, unsigned long *sync_error_bits)
+         struct dm_io_region *where, unsigned long *sync_error_bits)
 {
        int r;
        struct dpages dp;
similarity index 71%
rename from drivers/md/kcopyd.c
rename to drivers/md/dm-kcopyd.c
index e76b52ade69027f467fe3f16cf1b10443f4711bc..996802b8a4522e50126365d070f21458f9b68be2 100644 (file)
@@ -9,9 +9,8 @@
  * completion notification.
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <asm/atomic.h>
-
 #include <linux/blkdev.h>
 #include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/dm-kcopyd.h>
 
-#include "kcopyd.h"
-
-static struct workqueue_struct *_kcopyd_wq;
-static struct work_struct _kcopyd_work;
-
-static void wake(void)
-{
-       queue_work(_kcopyd_wq, &_kcopyd_work);
-}
+#include "dm.h"
 
 /*-----------------------------------------------------------------
  * Each kcopyd client has its own little pool of preallocated
  * pages for kcopyd io.
  *---------------------------------------------------------------*/
-struct kcopyd_client {
-       struct list_head list;
-
+struct dm_kcopyd_client {
        spinlock_t lock;
        struct page_list *pages;
        unsigned int nr_pages;
@@ -50,8 +40,32 @@ struct kcopyd_client {
 
        wait_queue_head_t destroyq;
        atomic_t nr_jobs;
+
+       mempool_t *job_pool;
+
+       struct workqueue_struct *kcopyd_wq;
+       struct work_struct kcopyd_work;
+
+/*
+ * We maintain three lists of jobs:
+ *
+ * i)   jobs waiting for pages
+ * ii)  jobs that have pages, and are waiting for the io to be issued.
+ * iii) jobs that have completed.
+ *
+ * All three of these are protected by job_lock.
+ */
+       spinlock_t job_lock;
+       struct list_head complete_jobs;
+       struct list_head io_jobs;
+       struct list_head pages_jobs;
 };
 
+static void wake(struct dm_kcopyd_client *kc)
+{
+       queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
+}
+
 static struct page_list *alloc_pl(void)
 {
        struct page_list *pl;
@@ -75,7 +89,7 @@ static void free_pl(struct page_list *pl)
        kfree(pl);
 }
 
-static int kcopyd_get_pages(struct kcopyd_client *kc,
+static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
                            unsigned int nr, struct page_list **pages)
 {
        struct page_list *pl;
@@ -98,7 +112,7 @@ static int kcopyd_get_pages(struct kcopyd_client *kc,
        return 0;
 }
 
-static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl)
+static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
 {
        struct page_list *cursor;
 
@@ -126,7 +140,7 @@ static void drop_pages(struct page_list *pl)
        }
 }
 
-static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
+static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
 {
        unsigned int i;
        struct page_list *pl = NULL, *next;
@@ -147,7 +161,7 @@ static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
        return 0;
 }
 
-static void client_free_pages(struct kcopyd_client *kc)
+static void client_free_pages(struct dm_kcopyd_client *kc)
 {
        BUG_ON(kc->nr_free_pages != kc->nr_pages);
        drop_pages(kc->pages);
@@ -161,7 +175,7 @@ static void client_free_pages(struct kcopyd_client *kc)
  * ever having to do io (which could cause a deadlock).
  *---------------------------------------------------------------*/
 struct kcopyd_job {
-       struct kcopyd_client *kc;
+       struct dm_kcopyd_client *kc;
        struct list_head list;
        unsigned long flags;
 
@@ -175,13 +189,13 @@ struct kcopyd_job {
         * Either READ or WRITE
         */
        int rw;
-       struct io_region source;
+       struct dm_io_region source;
 
        /*
         * The destinations for the transfer.
         */
        unsigned int num_dests;
-       struct io_region dests[KCOPYD_MAX_REGIONS];
+       struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
 
        sector_t offset;
        unsigned int nr_pages;
@@ -191,7 +205,7 @@ struct kcopyd_job {
         * Set this to ensure you are notified when the job has
         * completed.  'context' is for callback to use.
         */
-       kcopyd_notify_fn fn;
+       dm_kcopyd_notify_fn fn;
        void *context;
 
        /*
@@ -207,47 +221,19 @@ struct kcopyd_job {
 #define MIN_JOBS 512
 
 static struct kmem_cache *_job_cache;
-static mempool_t *_job_pool;
 
-/*
- * We maintain three lists of jobs:
- *
- * i)   jobs waiting for pages
- * ii)  jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that have completed.
- *
- * All three of these are protected by job_lock.
- */
-static DEFINE_SPINLOCK(_job_lock);
-
-static LIST_HEAD(_complete_jobs);
-static LIST_HEAD(_io_jobs);
-static LIST_HEAD(_pages_jobs);
-
-static int jobs_init(void)
+int __init dm_kcopyd_init(void)
 {
        _job_cache = KMEM_CACHE(kcopyd_job, 0);
        if (!_job_cache)
                return -ENOMEM;
 
-       _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
-       if (!_job_pool) {
-               kmem_cache_destroy(_job_cache);
-               return -ENOMEM;
-       }
-
        return 0;
 }
 
-static void jobs_exit(void)
+void dm_kcopyd_exit(void)
 {
-       BUG_ON(!list_empty(&_complete_jobs));
-       BUG_ON(!list_empty(&_io_jobs));
-       BUG_ON(!list_empty(&_pages_jobs));
-
-       mempool_destroy(_job_pool);
        kmem_cache_destroy(_job_cache);
-       _job_pool = NULL;
        _job_cache = NULL;
 }
 
@@ -255,18 +241,19 @@ static void jobs_exit(void)
  * Functions to push and pop a job onto the head of a given job
  * list.
  */
-static struct kcopyd_job *pop(struct list_head *jobs)
+static struct kcopyd_job *pop(struct list_head *jobs,
+                             struct dm_kcopyd_client *kc)
 {
        struct kcopyd_job *job = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&_job_lock, flags);
+       spin_lock_irqsave(&kc->job_lock, flags);
 
        if (!list_empty(jobs)) {
                job = list_entry(jobs->next, struct kcopyd_job, list);
                list_del(&job->list);
        }
-       spin_unlock_irqrestore(&_job_lock, flags);
+       spin_unlock_irqrestore(&kc->job_lock, flags);
 
        return job;
 }
@@ -274,10 +261,11 @@ static struct kcopyd_job *pop(struct list_head *jobs)
 static void push(struct list_head *jobs, struct kcopyd_job *job)
 {
        unsigned long flags;
+       struct dm_kcopyd_client *kc = job->kc;
 
-       spin_lock_irqsave(&_job_lock, flags);
+       spin_lock_irqsave(&kc->job_lock, flags);
        list_add_tail(&job->list, jobs);
-       spin_unlock_irqrestore(&_job_lock, flags);
+       spin_unlock_irqrestore(&kc->job_lock, flags);
 }
 
 /*
@@ -294,11 +282,11 @@ static int run_complete_job(struct kcopyd_job *job)
        void *context = job->context;
        int read_err = job->read_err;
        unsigned long write_err = job->write_err;
-       kcopyd_notify_fn fn = job->fn;
-       struct kcopyd_client *kc = job->kc;
+       dm_kcopyd_notify_fn fn = job->fn;
+       struct dm_kcopyd_client *kc = job->kc;
 
        kcopyd_put_pages(kc, job->pages);
-       mempool_free(job, _job_pool);
+       mempool_free(job, kc->job_pool);
        fn(read_err, write_err, context);
 
        if (atomic_dec_and_test(&kc->nr_jobs))
@@ -310,6 +298,7 @@ static int run_complete_job(struct kcopyd_job *job)
 static void complete_io(unsigned long error, void *context)
 {
        struct kcopyd_job *job = (struct kcopyd_job *) context;
+       struct dm_kcopyd_client *kc = job->kc;
 
        if (error) {
                if (job->rw == WRITE)
@@ -317,22 +306,22 @@ static void complete_io(unsigned long error, void *context)
                else
                        job->read_err = 1;
 
-               if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
-                       push(&_complete_jobs, job);
-                       wake();
+               if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
+                       push(&kc->complete_jobs, job);
+                       wake(kc);
                        return;
                }
        }
 
        if (job->rw == WRITE)
-               push(&_complete_jobs, job);
+               push(&kc->complete_jobs, job);
 
        else {
                job->rw = WRITE;
-               push(&_io_jobs, job);
+               push(&kc->io_jobs, job);
        }
 
-       wake();
+       wake(kc);
 }
 
 /*
@@ -343,7 +332,7 @@ static int run_io_job(struct kcopyd_job *job)
 {
        int r;
        struct dm_io_request io_req = {
-               .bi_rw = job->rw,
+               .bi_rw = job->rw | (1 << BIO_RW_SYNC),
                .mem.type = DM_IO_PAGE_LIST,
                .mem.ptr.pl = job->pages,
                .mem.offset = job->offset,
@@ -369,7 +358,7 @@ static int run_pages_job(struct kcopyd_job *job)
        r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
        if (!r) {
                /* this job is ready for io */
-               push(&_io_jobs, job);
+               push(&job->kc->io_jobs, job);
                return 0;
        }
 
@@ -384,12 +373,13 @@ static int run_pages_job(struct kcopyd_job *job)
  * Run through a list for as long as possible.  Returns the count
  * of successful jobs.
  */
-static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
+static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
+                       int (*fn) (struct kcopyd_job *))
 {
        struct kcopyd_job *job;
        int r, count = 0;
 
-       while ((job = pop(jobs))) {
+       while ((job = pop(jobs, kc))) {
 
                r = fn(job);
 
@@ -399,7 +389,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
                                job->write_err = (unsigned long) -1L;
                        else
                                job->read_err = 1;
-                       push(&_complete_jobs, job);
+                       push(&kc->complete_jobs, job);
                        break;
                }
 
@@ -421,8 +411,11 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
 /*
  * kcopyd does this every time it's woken up.
  */
-static void do_work(struct work_struct *ignored)
+static void do_work(struct work_struct *work)
 {
+       struct dm_kcopyd_client *kc = container_of(work,
+                                       struct dm_kcopyd_client, kcopyd_work);
+
        /*
         * The order that these are called is *very* important.
         * complete jobs can free some pages for pages jobs.
@@ -430,9 +423,9 @@ static void do_work(struct work_struct *ignored)
         * list.  io jobs call wake when they complete and it all
         * starts again.
         */
-       process_jobs(&_complete_jobs, run_complete_job);
-       process_jobs(&_pages_jobs, run_pages_job);
-       process_jobs(&_io_jobs, run_io_job);
+       process_jobs(&kc->complete_jobs, kc, run_complete_job);
+       process_jobs(&kc->pages_jobs, kc, run_pages_job);
+       process_jobs(&kc->io_jobs, kc, run_io_job);
 }
 
 /*
@@ -442,9 +435,10 @@ static void do_work(struct work_struct *ignored)
  */
 static void dispatch_job(struct kcopyd_job *job)
 {
-       atomic_inc(&job->kc->nr_jobs);
-       push(&_pages_jobs, job);
-       wake();
+       struct dm_kcopyd_client *kc = job->kc;
+       atomic_inc(&kc->nr_jobs);
+       push(&kc->pages_jobs, job);
+       wake(kc);
 }
 
 #define SUB_JOB_SIZE 128
@@ -469,7 +463,7 @@ static void segment_complete(int read_err, unsigned long write_err,
         * Only dispatch more work if there hasn't been an error.
         */
        if ((!job->read_err && !job->write_err) ||
-           test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
+           test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
                /* get the next chunk of work */
                progress = job->progress;
                count = job->source.count - progress;
@@ -484,7 +478,8 @@ static void segment_complete(int read_err, unsigned long write_err,
 
        if (count) {
                int i;
-               struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
+               struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
+                                                          GFP_NOIO);
 
                *sub_job = *job;
                sub_job->source.sector += progress;
@@ -508,7 +503,7 @@ static void segment_complete(int read_err, unsigned long write_err,
                 * after we've completed.
                 */
                job->fn(read_err, write_err, job->context);
-               mempool_free(job, _job_pool);
+               mempool_free(job, job->kc->job_pool);
        }
 }
 
@@ -526,16 +521,16 @@ static void split_job(struct kcopyd_job *job)
                segment_complete(0, 0u, job);
 }
 
-int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
-               unsigned int num_dests, struct io_region *dests,
-               unsigned int flags, kcopyd_notify_fn fn, void *context)
+int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+                  unsigned int num_dests, struct dm_io_region *dests,
+                  unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
 {
        struct kcopyd_job *job;
 
        /*
         * Allocate a new job.
         */
-       job = mempool_alloc(_job_pool, GFP_NOIO);
+       job = mempool_alloc(kc->job_pool, GFP_NOIO);
 
        /*
         * set up for the read.
@@ -569,6 +564,7 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
 
        return 0;
 }
+EXPORT_SYMBOL(dm_kcopyd_copy);
 
 /*
  * Cancels a kcopyd job, eg. someone might be deactivating a
@@ -583,126 +579,76 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
 #endif  /*  0  */
 
 /*-----------------------------------------------------------------
- * Unit setup
+ * Client setup
  *---------------------------------------------------------------*/
-static DEFINE_MUTEX(_client_lock);
-static LIST_HEAD(_clients);
-
-static void client_add(struct kcopyd_client *kc)
+int dm_kcopyd_client_create(unsigned int nr_pages,
+                           struct dm_kcopyd_client **result)
 {
-       mutex_lock(&_client_lock);
-       list_add(&kc->list, &_clients);
-       mutex_unlock(&_client_lock);
-}
-
-static void client_del(struct kcopyd_client *kc)
-{
-       mutex_lock(&_client_lock);
-       list_del(&kc->list);
-       mutex_unlock(&_client_lock);
-}
-
-static DEFINE_MUTEX(kcopyd_init_lock);
-static int kcopyd_clients = 0;
+       int r = -ENOMEM;
+       struct dm_kcopyd_client *kc;
 
-static int kcopyd_init(void)
-{
-       int r;
-
-       mutex_lock(&kcopyd_init_lock);
-
-       if (kcopyd_clients) {
-               /* Already initialized. */
-               kcopyd_clients++;
-               mutex_unlock(&kcopyd_init_lock);
-               return 0;
-       }
-
-       r = jobs_init();
-       if (r) {
-               mutex_unlock(&kcopyd_init_lock);
-               return r;
-       }
-
-       _kcopyd_wq = create_singlethread_workqueue("kcopyd");
-       if (!_kcopyd_wq) {
-               jobs_exit();
-               mutex_unlock(&kcopyd_init_lock);
+       kc = kmalloc(sizeof(*kc), GFP_KERNEL);
+       if (!kc)
                return -ENOMEM;
-       }
-
-       kcopyd_clients++;
-       INIT_WORK(&_kcopyd_work, do_work);
-       mutex_unlock(&kcopyd_init_lock);
-       return 0;
-}
 
-static void kcopyd_exit(void)
-{
-       mutex_lock(&kcopyd_init_lock);
-       kcopyd_clients--;
-       if (!kcopyd_clients) {
-               jobs_exit();
-               destroy_workqueue(_kcopyd_wq);
-               _kcopyd_wq = NULL;
-       }
-       mutex_unlock(&kcopyd_init_lock);
-}
-
-int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
-{
-       int r = 0;
-       struct kcopyd_client *kc;
+       spin_lock_init(&kc->lock);
+       spin_lock_init(&kc->job_lock);
+       INIT_LIST_HEAD(&kc->complete_jobs);
+       INIT_LIST_HEAD(&kc->io_jobs);
+       INIT_LIST_HEAD(&kc->pages_jobs);
 
-       r = kcopyd_init();
-       if (r)
-               return r;
+       kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
+       if (!kc->job_pool)
+               goto bad_slab;
 
-       kc = kmalloc(sizeof(*kc), GFP_KERNEL);
-       if (!kc) {
-               kcopyd_exit();
-               return -ENOMEM;
-       }
+       INIT_WORK(&kc->kcopyd_work, do_work);
+       kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
+       if (!kc->kcopyd_wq)
+               goto bad_workqueue;
 
-       spin_lock_init(&kc->lock);
        kc->pages = NULL;
        kc->nr_pages = kc->nr_free_pages = 0;
        r = client_alloc_pages(kc, nr_pages);
-       if (r) {
-               kfree(kc);
-               kcopyd_exit();
-               return r;
-       }
+       if (r)
+               goto bad_client_pages;
 
        kc->io_client = dm_io_client_create(nr_pages);
        if (IS_ERR(kc->io_client)) {
                r = PTR_ERR(kc->io_client);
-               client_free_pages(kc);
-               kfree(kc);
-               kcopyd_exit();
-               return r;
+               goto bad_io_client;
        }
 
        init_waitqueue_head(&kc->destroyq);
        atomic_set(&kc->nr_jobs, 0);
 
-       client_add(kc);
        *result = kc;
        return 0;
+
+bad_io_client:
+       client_free_pages(kc);
+bad_client_pages:
+       destroy_workqueue(kc->kcopyd_wq);
+bad_workqueue:
+       mempool_destroy(kc->job_pool);
+bad_slab:
+       kfree(kc);
+
+       return r;
 }
+EXPORT_SYMBOL(dm_kcopyd_client_create);
 
-void kcopyd_client_destroy(struct kcopyd_client *kc)
+void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
 {
        /* Wait for completion of all jobs submitted by this client. */
        wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
 
+       BUG_ON(!list_empty(&kc->complete_jobs));
+       BUG_ON(!list_empty(&kc->io_jobs));
+       BUG_ON(!list_empty(&kc->pages_jobs));
+       destroy_workqueue(kc->kcopyd_wq);
        dm_io_client_destroy(kc->io_client);
        client_free_pages(kc);
-       client_del(kc);
+       mempool_destroy(kc->job_pool);
        kfree(kc);
-       kcopyd_exit();
 }
-
-EXPORT_SYMBOL(kcopyd_client_create);
-EXPORT_SYMBOL(kcopyd_client_destroy);
-EXPORT_SYMBOL(kcopyd_copy);
+EXPORT_SYMBOL(dm_kcopyd_client_destroy);
index 2a74b2142f502a6e2ea944e39b598d90cbb96cac..67a6f31b7fc3e2d39a3dfbf6813d6b163eae6707 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the LGPL.
  */
@@ -8,64 +9,58 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/dm-io.h>
+#include <linux/dm-dirty-log.h>
 
-#include "dm-log.h"
-#include "dm-io.h"
+#include "dm.h"
 
-#define DM_MSG_PREFIX "mirror log"
+#define DM_MSG_PREFIX "dirty region log"
 
-static LIST_HEAD(_log_types);
-static DEFINE_SPINLOCK(_lock);
+struct dm_dirty_log_internal {
+       struct dm_dirty_log_type *type;
 
-int dm_register_dirty_log_type(struct dirty_log_type *type)
-{
-       spin_lock(&_lock);
-       type->use_count = 0;
-       list_add(&type->list, &_log_types);
-       spin_unlock(&_lock);
+       struct list_head list;
+       long use;
+};
 
-       return 0;
-}
+static LIST_HEAD(_log_types);
+static DEFINE_SPINLOCK(_lock);
 
-int dm_unregister_dirty_log_type(struct dirty_log_type *type)
+static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name)
 {
-       spin_lock(&_lock);
-
-       if (type->use_count)
-               DMWARN("Attempt to unregister a log type that is still in use");
-       else
-               list_del(&type->list);
+       struct dm_dirty_log_internal *log_type;
 
-       spin_unlock(&_lock);
+       list_for_each_entry(log_type, &_log_types, list)
+               if (!strcmp(name, log_type->type->name))
+                       return log_type;
 
-       return 0;
+       return NULL;
 }
 
-static struct dirty_log_type *_get_type(const char *type_name)
+static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name)
 {
-       struct dirty_log_type *type;
+       struct dm_dirty_log_internal *log_type;
 
        spin_lock(&_lock);
-       list_for_each_entry (type, &_log_types, list)
-               if (!strcmp(type_name, type->name)) {
-                       if (!type->use_count && !try_module_get(type->module)){
-                               spin_unlock(&_lock);
-                               return NULL;
-                       }
-                       type->use_count++;
-                       spin_unlock(&_lock);
-                       return type;
-               }
+
+       log_type = __find_dirty_log_type(name);
+       if (log_type) {
+               if (!log_type->use && !try_module_get(log_type->type->module))
+                       log_type = NULL;
+               else
+                       log_type->use++;
+       }
 
        spin_unlock(&_lock);
-       return NULL;
+
+       return log_type;
 }
 
 /*
  * get_type
  * @type_name
  *
- * Attempt to retrieve the dirty_log_type by name.  If not already
+ * Attempt to retrieve the dm_dirty_log_type by name.  If not already
  * available, attempt to load the appropriate module.
  *
  * Log modules are named "dm-log-" followed by the 'type_name'.
@@ -78,14 +73,17 @@ static struct dirty_log_type *_get_type(const char *type_name)
  *
  * Returns: dirty_log_type* on success, NULL on failure
  */
-static struct dirty_log_type *get_type(const char *type_name)
+static struct dm_dirty_log_type *get_type(const char *type_name)
 {
        char *p, *type_name_dup;
-       struct dirty_log_type *type;
+       struct dm_dirty_log_internal *log_type;
+
+       if (!type_name)
+               return NULL;
 
-       type = _get_type(type_name);
-       if (type)
-               return type;
+       log_type = _get_dirty_log_type(type_name);
+       if (log_type)
+               return log_type->type;
 
        type_name_dup = kstrdup(type_name, GFP_KERNEL);
        if (!type_name_dup) {
@@ -95,34 +93,106 @@ static struct dirty_log_type *get_type(const char *type_name)
        }
 
        while (request_module("dm-log-%s", type_name_dup) ||
-              !(type = _get_type(type_name))) {
+              !(log_type = _get_dirty_log_type(type_name))) {
                p = strrchr(type_name_dup, '-');
                if (!p)
                        break;
                p[0] = '\0';
        }
 
-       if (!type)
+       if (!log_type)
                DMWARN("Module for logging type \"%s\" not found.", type_name);
 
        kfree(type_name_dup);
 
-       return type;
+       return log_type ? log_type->type : NULL;
 }
 
-static void put_type(struct dirty_log_type *type)
+static void put_type(struct dm_dirty_log_type *type)
 {
+       struct dm_dirty_log_internal *log_type;
+
+       if (!type)
+               return;
+
        spin_lock(&_lock);
-       if (!--type->use_count)
+       log_type = __find_dirty_log_type(type->name);
+       if (!log_type)
+               goto out;
+
+       if (!--log_type->use)
                module_put(type->module);
+
+       BUG_ON(log_type->use < 0);
+
+out:
        spin_unlock(&_lock);
 }
 
-struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
-                                     unsigned int argc, char **argv)
+static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type)
 {
-       struct dirty_log_type *type;
-       struct dirty_log *log;
+       struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type),
+                                                        GFP_KERNEL);
+
+       if (log_type)
+               log_type->type = type;
+
+       return log_type;
+}
+
+int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
+{
+       struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type);
+       int r = 0;
+
+       if (!log_type)
+               return -ENOMEM;
+
+       spin_lock(&_lock);
+       if (!__find_dirty_log_type(type->name))
+               list_add(&log_type->list, &_log_types);
+       else {
+               kfree(log_type);
+               r = -EEXIST;
+       }
+       spin_unlock(&_lock);
+
+       return r;
+}
+EXPORT_SYMBOL(dm_dirty_log_type_register);
+
+int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
+{
+       struct dm_dirty_log_internal *log_type;
+
+       spin_lock(&_lock);
+
+       log_type = __find_dirty_log_type(type->name);
+       if (!log_type) {
+               spin_unlock(&_lock);
+               return -EINVAL;
+       }
+
+       if (log_type->use) {
+               spin_unlock(&_lock);
+               return -ETXTBSY;
+       }
+
+       list_del(&log_type->list);
+
+       spin_unlock(&_lock);
+       kfree(log_type);
+
+       return 0;
+}
+EXPORT_SYMBOL(dm_dirty_log_type_unregister);
+
+struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
+                                        struct dm_target *ti,
+                                        unsigned int argc, char **argv)
+{
+       struct dm_dirty_log_type *type;
+       struct dm_dirty_log *log;
 
        log = kmalloc(sizeof(*log), GFP_KERNEL);
        if (!log)
@@ -143,13 +213,15 @@ struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *t
 
        return log;
 }
+EXPORT_SYMBOL(dm_dirty_log_create);
 
-void dm_destroy_dirty_log(struct dirty_log *log)
+void dm_dirty_log_destroy(struct dm_dirty_log *log)
 {
        log->type->dtr(log);
        put_type(log->type);
        kfree(log);
 }
+EXPORT_SYMBOL(dm_dirty_log_destroy);
 
 /*-----------------------------------------------------------------
  * Persistent and core logs share a lot of their implementation.
@@ -207,7 +279,7 @@ struct log_c {
        struct dm_dev *log_dev;
        struct log_header header;
 
-       struct io_region header_location;
+       struct dm_io_region header_location;
        struct log_header *disk_header;
 };
 
@@ -215,7 +287,7 @@ struct log_c {
  * The touched member needs to be updated every time we access
  * one of the bitsets.
  */
-static  inline int log_test_bit(uint32_t *bs, unsigned bit)
+static inline int log_test_bit(uint32_t *bs, unsigned bit)
 {
        return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
 }
@@ -302,7 +374,7 @@ static inline int write_header(struct log_c *log)
  * argv contains region_size followed optionally by [no]sync
  *--------------------------------------------------------------*/
 #define BYTE_SHIFT 3
-static int create_log_context(struct dirty_log *log, struct dm_target *ti,
+static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
                              unsigned int argc, char **argv,
                              struct dm_dev *dev)
 {
@@ -315,7 +387,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
        int r;
 
        if (argc < 1 || argc > 2) {
-               DMWARN("wrong number of arguments to mirror log");
+               DMWARN("wrong number of arguments to dirty region log");
                return -EINVAL;
        }
 
@@ -325,8 +397,8 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
                else if (!strcmp(argv[1], "nosync"))
                        sync = NOSYNC;
                else {
-                       DMWARN("unrecognised sync argument to mirror log: %s",
-                              argv[1]);
+                       DMWARN("unrecognised sync argument to "
+                              "dirty region log: %s", argv[1]);
                        return -EINVAL;
                }
        }
@@ -434,7 +506,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
        return 0;
 }
 
-static int core_ctr(struct dirty_log *log, struct dm_target *ti,
+static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
                    unsigned int argc, char **argv)
 {
        return create_log_context(log, ti, argc, argv, NULL);
@@ -447,7 +519,7 @@ static void destroy_log_context(struct log_c *lc)
        kfree(lc);
 }
 
-static void core_dtr(struct dirty_log *log)
+static void core_dtr(struct dm_dirty_log *log)
 {
        struct log_c *lc = (struct log_c *) log->context;
 
@@ -460,14 +532,14 @@ static void core_dtr(struct dirty_log *log)
  *
  * argv contains log_device region_size followed optionally by [no]sync
  *--------------------------------------------------------------*/
-static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
+static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
                    unsigned int argc, char **argv)
 {
        int r;
        struct dm_dev *dev;
 
        if (argc < 2 || argc > 3) {
-               DMWARN("wrong number of arguments to disk mirror log");
+               DMWARN("wrong number of arguments to disk dirty region log");
                return -EINVAL;
        }
 
@@ -485,7 +557,7 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
        return 0;
 }
 
-static void disk_dtr(struct dirty_log *log)
+static void disk_dtr(struct dm_dirty_log *log)
 {
        struct log_c *lc = (struct log_c *) log->context;
 
@@ -514,7 +586,7 @@ static void fail_log_device(struct log_c *lc)
        dm_table_event(lc->ti->table);
 }
 
-static int disk_resume(struct dirty_log *log)
+static int disk_resume(struct dm_dirty_log *log)
 {
        int r;
        unsigned i;
@@ -524,7 +596,7 @@ static int disk_resume(struct dirty_log *log)
        /* read the disk header */
        r = read_header(lc);
        if (r) {
-               DMWARN("%s: Failed to read header on mirror log device",
+               DMWARN("%s: Failed to read header on dirty region log device",
                       lc->log_dev->name);
                fail_log_device(lc);
                /*
@@ -562,7 +634,7 @@ static int disk_resume(struct dirty_log *log)
        /* write the new header */
        r = write_header(lc);
        if (r) {
-               DMWARN("%s: Failed to write header on mirror log device",
+               DMWARN("%s: Failed to write header on dirty region log device",
                       lc->log_dev->name);
                fail_log_device(lc);
        }
@@ -570,38 +642,38 @@ static int disk_resume(struct dirty_log *log)
        return r;
 }
 
-static uint32_t core_get_region_size(struct dirty_log *log)
+static uint32_t core_get_region_size(struct dm_dirty_log *log)
 {
        struct log_c *lc = (struct log_c *) log->context;
        return lc->region_size;
 }
 
-static int core_resume(struct dirty_log *log)
+static int core_resume(struct dm_dirty_log *log)
 {
        struct log_c *lc = (struct log_c *) log->context;
        lc->sync_search = 0;
        return 0;
 }
 
-static int core_is_clean(struct dirty_log *log, region_t region)
+static int core_is_clean(struct dm_dirty_log *log, region_t region)
 {
        struct log_c *lc = (struct log_c *) log->context;
        return log_test_bit(lc->clean_bits, region);
 }
 
-static int core_in_sync(struct dirty_log *log, region_t region, int block)
+static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
 {
        struct log_c *lc = (struct log_c *) log->context;
        return log_test_bit(lc->sync_bits, region);
 }
 
-static int core_flush(struct dirty_log *log)
+static int core_flush(struct dm_dirty_log *log)
 {
        /* no op */
        return 0;
 }
 
-static int disk_flush(struct dirty_log *log)
+static int disk_flush(struct dm_dirty_log *log)
 {
        int r;
        struct log_c *lc = (struct log_c *) log->context;
@@ -619,19 +691,19 @@ static int disk_flush(struct dirty_log *log)
        return r;
 }
 
-static void core_mark_region(struct dirty_log *log, region_t region)
+static void core_mark_region(struct dm_dirty_log *log, region_t region)
 {
        struct log_c *lc = (struct log_c *) log->context;
        log_clear_bit(lc, lc->clean_bits, region);
 }
 
-static void core_clear_region(struct dirty_log *log, region_t region)
+static void core_clear_region(struct dm_dirty_log *log, region_t region)
 {
        struct log_c *lc = (struct log_c *) log->context;
        log_set_bit(lc, lc->clean_bits, region);
 }
 
-static int core_get_resync_work(struct dirty_log *log, region_t *region)
+static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
 {
        struct log_c *lc = (struct log_c *) log->context;
 
@@ -654,7 +726,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
        return 1;
 }
 
-static void core_set_region_sync(struct dirty_log *log, region_t region,
+static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
                                 int in_sync)
 {
        struct log_c *lc = (struct log_c *) log->context;
@@ -669,7 +741,7 @@ static void core_set_region_sync(struct dirty_log *log, region_t region,
        }
 }
 
-static region_t core_get_sync_count(struct dirty_log *log)
+static region_t core_get_sync_count(struct dm_dirty_log *log)
 {
         struct log_c *lc = (struct log_c *) log->context;
 
@@ -680,7 +752,7 @@ static region_t core_get_sync_count(struct dirty_log *log)
        if (lc->sync != DEFAULTSYNC) \
                DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
 
-static int core_status(struct dirty_log *log, status_type_t status,
+static int core_status(struct dm_dirty_log *log, status_type_t status,
                       char *result, unsigned int maxlen)
 {
        int sz = 0;
@@ -700,7 +772,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
        return sz;
 }
 
-static int disk_status(struct dirty_log *log, status_type_t status,
+static int disk_status(struct dm_dirty_log *log, status_type_t status,
                       char *result, unsigned int maxlen)
 {
        int sz = 0;
@@ -722,7 +794,7 @@ static int disk_status(struct dirty_log *log, status_type_t status,
        return sz;
 }
 
-static struct dirty_log_type _core_type = {
+static struct dm_dirty_log_type _core_type = {
        .name = "core",
        .module = THIS_MODULE,
        .ctr = core_ctr,
@@ -740,7 +812,7 @@ static struct dirty_log_type _core_type = {
        .status = core_status,
 };
 
-static struct dirty_log_type _disk_type = {
+static struct dm_dirty_log_type _disk_type = {
        .name = "disk",
        .module = THIS_MODULE,
        .ctr = disk_ctr,
@@ -763,26 +835,28 @@ int __init dm_dirty_log_init(void)
 {
        int r;
 
-       r = dm_register_dirty_log_type(&_core_type);
+       r = dm_dirty_log_type_register(&_core_type);
        if (r)
                DMWARN("couldn't register core log");
 
-       r = dm_register_dirty_log_type(&_disk_type);
+       r = dm_dirty_log_type_register(&_disk_type);
        if (r) {
                DMWARN("couldn't register disk type");
-               dm_unregister_dirty_log_type(&_core_type);
+               dm_dirty_log_type_unregister(&_core_type);
        }
 
        return r;
 }
 
-void dm_dirty_log_exit(void)
+void __exit dm_dirty_log_exit(void)
 {
-       dm_unregister_dirty_log_type(&_disk_type);
-       dm_unregister_dirty_log_type(&_core_type);
+       dm_dirty_log_type_unregister(&_disk_type);
+       dm_dirty_log_type_unregister(&_core_type);
 }
 
-EXPORT_SYMBOL(dm_register_dirty_log_type);
-EXPORT_SYMBOL(dm_unregister_dirty_log_type);
-EXPORT_SYMBOL(dm_create_dirty_log);
-EXPORT_SYMBOL(dm_destroy_dirty_log);
+module_init(dm_dirty_log_init);
+module_exit(dm_dirty_log_exit);
+
+MODULE_DESCRIPTION(DM_NAME " dirty region log");
+MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
index 762cb086bb7f6e864fca4998f407644e8add96cc..ff05fe89308313ab5792a6b3dc9146db3e585c6a 100644 (file)
@@ -7,9 +7,6 @@
 #include "dm.h"
 #include "dm-bio-list.h"
 #include "dm-bio-record.h"
-#include "dm-io.h"
-#include "dm-log.h"
-#include "kcopyd.h"
 
 #include <linux/ctype.h>
 #include <linux/init.h>
@@ -22,6 +19,9 @@
 #include <linux/workqueue.h>
 #include <linux/log2.h>
 #include <linux/hardirq.h>
+#include <linux/dm-io.h>
+#include <linux/dm-dirty-log.h>
+#include <linux/dm-kcopyd.h>
 
 #define DM_MSG_PREFIX "raid1"
 #define DM_IO_PAGES 64
@@ -74,7 +74,7 @@ struct region_hash {
        unsigned region_shift;
 
        /* holds persistent region state */
-       struct dirty_log *log;
+       struct dm_dirty_log *log;
 
        /* hash table */
        rwlock_t hash_lock;
@@ -133,7 +133,7 @@ struct mirror_set {
        struct dm_target *ti;
        struct list_head list;
        struct region_hash rh;
-       struct kcopyd_client *kcopyd_client;
+       struct dm_kcopyd_client *kcopyd_client;
        uint64_t features;
 
        spinlock_t lock;        /* protects the lists */
@@ -154,6 +154,9 @@ struct mirror_set {
 
        struct workqueue_struct *kmirrord_wq;
        struct work_struct kmirrord_work;
+       struct timer_list timer;
+       unsigned long timer_pending;
+
        struct work_struct trigger_event;
 
        unsigned int nr_mirrors;
@@ -178,13 +181,32 @@ static void wake(struct mirror_set *ms)
        queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
 }
 
+static void delayed_wake_fn(unsigned long data)
+{
+       struct mirror_set *ms = (struct mirror_set *) data;
+
+       clear_bit(0, &ms->timer_pending);
+       wake(ms);
+}
+
+static void delayed_wake(struct mirror_set *ms)
+{
+       if (test_and_set_bit(0, &ms->timer_pending))
+               return;
+
+       ms->timer.expires = jiffies + HZ / 5;
+       ms->timer.data = (unsigned long) ms;
+       ms->timer.function = delayed_wake_fn;
+       add_timer(&ms->timer);
+}
+
 /* FIXME move this */
 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
 
 #define MIN_REGIONS 64
 #define MAX_RECOVERY 1
 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
-                  struct dirty_log *log, uint32_t region_size,
+                  struct dm_dirty_log *log, uint32_t region_size,
                   region_t nr_regions)
 {
        unsigned int nr_buckets, max_buckets;
@@ -249,7 +271,7 @@ static void rh_exit(struct region_hash *rh)
        }
 
        if (rh->log)
-               dm_destroy_dirty_log(rh->log);
+               dm_dirty_log_destroy(rh->log);
        if (rh->region_pool)
                mempool_destroy(rh->region_pool);
        vfree(rh->buckets);
@@ -405,24 +427,22 @@ static void rh_update_states(struct region_hash *rh)
        write_lock_irq(&rh->hash_lock);
        spin_lock(&rh->region_lock);
        if (!list_empty(&rh->clean_regions)) {
-               list_splice(&rh->clean_regions, &clean);
-               INIT_LIST_HEAD(&rh->clean_regions);
+               list_splice_init(&rh->clean_regions, &clean);
 
                list_for_each_entry(reg, &clean, list)
                        list_del(&reg->hash_list);
        }
 
        if (!list_empty(&rh->recovered_regions)) {
-               list_splice(&rh->recovered_regions, &recovered);
-               INIT_LIST_HEAD(&rh->recovered_regions);
+               list_splice_init(&rh->recovered_regions, &recovered);
 
                list_for_each_entry (reg, &recovered, list)
                        list_del(&reg->hash_list);
        }
 
        if (!list_empty(&rh->failed_recovered_regions)) {
-               list_splice(&rh->failed_recovered_regions, &failed_recovered);
-               INIT_LIST_HEAD(&rh->failed_recovered_regions);
+               list_splice_init(&rh->failed_recovered_regions,
+                                &failed_recovered);
 
                list_for_each_entry(reg, &failed_recovered, list)
                        list_del(&reg->hash_list);
@@ -790,7 +810,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
 {
        int r;
        unsigned int i;
-       struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
+       struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
        struct mirror *m;
        unsigned long flags = 0;
 
@@ -822,9 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
        }
 
        /* hand to kcopyd */
-       set_bit(KCOPYD_IGNORE_ERROR, &flags);
-       r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
-                       recovery_complete, reg);
+       set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
+       r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
+                          flags, recovery_complete, reg);
 
        return r;
 }
@@ -833,7 +853,7 @@ static void do_recovery(struct mirror_set *ms)
 {
        int r;
        struct region *reg;
-       struct dirty_log *log = ms->rh.log;
+       struct dm_dirty_log *log = ms->rh.log;
 
        /*
         * Start quiescing some regions.
@@ -909,7 +929,7 @@ static void map_bio(struct mirror *m, struct bio *bio)
        bio->bi_sector = map_sector(m, bio);
 }
 
-static void map_region(struct io_region *io, struct mirror *m,
+static void map_region(struct dm_io_region *io, struct mirror *m,
                       struct bio *bio)
 {
        io->bdev = m->dev->bdev;
@@ -951,7 +971,7 @@ static void read_callback(unsigned long error, void *context)
 /* Asynchronous read. */
 static void read_async_bio(struct mirror *m, struct bio *bio)
 {
-       struct io_region io;
+       struct dm_io_region io;
        struct dm_io_request io_req = {
                .bi_rw = READ,
                .mem.type = DM_IO_BVEC,
@@ -1019,7 +1039,7 @@ static void __bio_mark_nosync(struct mirror_set *ms,
 {
        unsigned long flags;
        struct region_hash *rh = &ms->rh;
-       struct dirty_log *log = ms->rh.log;
+       struct dm_dirty_log *log = ms->rh.log;
        struct region *reg;
        region_t region = bio_to_region(rh, bio);
        int recovering = 0;
@@ -1107,7 +1127,7 @@ out:
 static void do_write(struct mirror_set *ms, struct bio *bio)
 {
        unsigned int i;
-       struct io_region io[ms->nr_mirrors], *dest = io;
+       struct dm_io_region io[ms->nr_mirrors], *dest = io;
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_rw = WRITE,
@@ -1182,6 +1202,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
                spin_lock_irq(&ms->lock);
                bio_list_merge(&ms->failures, &sync);
                spin_unlock_irq(&ms->lock);
+               wake(ms);
        } else
                while ((bio = bio_list_pop(&sync)))
                        do_write(ms, bio);
@@ -1241,7 +1262,7 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
        bio_list_merge(&ms->failures, failures);
        spin_unlock_irq(&ms->lock);
 
-       wake(ms);
+       delayed_wake(ms);
 }
 
 static void trigger_event(struct work_struct *work)
@@ -1255,7 +1276,7 @@ static void trigger_event(struct work_struct *work)
 /*-----------------------------------------------------------------
  * kmirrord
  *---------------------------------------------------------------*/
-static int _do_mirror(struct work_struct *work)
+static void do_mirror(struct work_struct *work)
 {
        struct mirror_set *ms =container_of(work, struct mirror_set,
                                            kmirrord_work);
@@ -1277,23 +1298,7 @@ static int _do_mirror(struct work_struct *work)
        do_writes(ms, &writes);
        do_failures(ms, &failures);
 
-       return (ms->failures.head) ? 1 : 0;
-}
-
-static void do_mirror(struct work_struct *work)
-{
-       /*
-        * If _do_mirror returns 1, we give it
-        * another shot.  This helps for cases like
-        * 'suspend' where we call flush_workqueue
-        * and expect all work to be finished.  If
-        * a failure happens during a suspend, we
-        * couldn't issue a 'wake' because it would
-        * not be honored.  Therefore, we return '1'
-        * from _do_mirror, and retry here.
-        */
-       while (_do_mirror(work))
-               schedule();
+       dm_table_unplug_all(ms->ti->table);
 }
 
 
@@ -1303,7 +1308,7 @@ static void do_mirror(struct work_struct *work)
 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
                                        uint32_t region_size,
                                        struct dm_target *ti,
-                                       struct dirty_log *dl)
+                                       struct dm_dirty_log *dl)
 {
        size_t len;
        struct mirror_set *ms = NULL;
@@ -1403,12 +1408,12 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
 /*
  * Create dirty log: log_type #log_params <log_params>
  */
-static struct dirty_log *create_dirty_log(struct dm_target *ti,
+static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
                                          unsigned int argc, char **argv,
                                          unsigned int *args_used)
 {
        unsigned int param_count;
-       struct dirty_log *dl;
+       struct dm_dirty_log *dl;
 
        if (argc < 2) {
                ti->error = "Insufficient mirror log arguments";
@@ -1427,7 +1432,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
                return NULL;
        }
 
-       dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
+       dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
        if (!dl) {
                ti->error = "Error creating mirror dirty log";
                return NULL;
@@ -1435,7 +1440,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
 
        if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
                ti->error = "Invalid region size";
-               dm_destroy_dirty_log(dl);
+               dm_dirty_log_destroy(dl);
                return NULL;
        }
 
@@ -1496,7 +1501,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        int r;
        unsigned int nr_mirrors, m, args_used;
        struct mirror_set *ms;
-       struct dirty_log *dl;
+       struct dm_dirty_log *dl;
 
        dl = create_dirty_log(ti, argc, argv, &args_used);
        if (!dl)
@@ -1506,9 +1511,9 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        argc -= args_used;
 
        if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
-           nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
+           nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
                ti->error = "Invalid number of mirrors";
-               dm_destroy_dirty_log(dl);
+               dm_dirty_log_destroy(dl);
                return -EINVAL;
        }
 
@@ -1516,13 +1521,13 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        if (argc < nr_mirrors * 2) {
                ti->error = "Too few mirror arguments";
-               dm_destroy_dirty_log(dl);
+               dm_dirty_log_destroy(dl);
                return -EINVAL;
        }
 
        ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
        if (!ms) {
-               dm_destroy_dirty_log(dl);
+               dm_dirty_log_destroy(dl);
                return -ENOMEM;
        }
 
@@ -1547,6 +1552,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto err_free_context;
        }
        INIT_WORK(&ms->kmirrord_work, do_mirror);
+       init_timer(&ms->timer);
+       ms->timer_pending = 0;
        INIT_WORK(&ms->trigger_event, trigger_event);
 
        r = parse_features(ms, argc, argv, &args_used);
@@ -1571,7 +1578,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto err_destroy_wq;
        }
 
-       r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
+       r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
        if (r)
                goto err_destroy_wq;
 
@@ -1589,8 +1596,9 @@ static void mirror_dtr(struct dm_target *ti)
 {
        struct mirror_set *ms = (struct mirror_set *) ti->private;
 
+       del_timer_sync(&ms->timer);
        flush_workqueue(ms->kmirrord_wq);
-       kcopyd_client_destroy(ms->kcopyd_client);
+       dm_kcopyd_client_destroy(ms->kcopyd_client);
        destroy_workqueue(ms->kmirrord_wq);
        free_context(ms, ti, ms->nr_mirrors);
 }
@@ -1734,7 +1742,7 @@ out:
 static void mirror_presuspend(struct dm_target *ti)
 {
        struct mirror_set *ms = (struct mirror_set *) ti->private;
-       struct dirty_log *log = ms->rh.log;
+       struct dm_dirty_log *log = ms->rh.log;
 
        atomic_set(&ms->suspend, 1);
 
@@ -1763,7 +1771,7 @@ static void mirror_presuspend(struct dm_target *ti)
 static void mirror_postsuspend(struct dm_target *ti)
 {
        struct mirror_set *ms = ti->private;
-       struct dirty_log *log = ms->rh.log;
+       struct dm_dirty_log *log = ms->rh.log;
 
        if (log->type->postsuspend && log->type->postsuspend(log))
                /* FIXME: need better error handling */
@@ -1773,7 +1781,7 @@ static void mirror_postsuspend(struct dm_target *ti)
 static void mirror_resume(struct dm_target *ti)
 {
        struct mirror_set *ms = ti->private;
-       struct dirty_log *log = ms->rh.log;
+       struct dm_dirty_log *log = ms->rh.log;
 
        atomic_set(&ms->suspend, 0);
        if (log->type->resume && log->type->resume(log))
@@ -1811,7 +1819,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
 {
        unsigned int m, sz = 0;
        struct mirror_set *ms = (struct mirror_set *) ti->private;
-       struct dirty_log *log = ms->rh.log;
+       struct dm_dirty_log *log = ms->rh.log;
        char buffer[ms->nr_mirrors + 1];
 
        switch (type) {
@@ -1864,15 +1872,9 @@ static int __init dm_mirror_init(void)
 {
        int r;
 
-       r = dm_dirty_log_init();
-       if (r)
-               return r;
-
        r = dm_register_target(&mirror_target);
-       if (r < 0) {
+       if (r < 0)
                DMERR("Failed to register mirror target");
-               dm_dirty_log_exit();
-       }
 
        return r;
 }
@@ -1884,8 +1886,6 @@ static void __exit dm_mirror_exit(void)
        r = dm_unregister_target(&mirror_target);
        if (r < 0)
                DMERR("unregister failed %d", r);
-
-       dm_dirty_log_exit();
 }
 
 /* Module hooks */
index 4dc8a43c034b0242422399a15a7d6a271f97da3d..1ba8a47d61b116646c6ac8ef533cd5b8f64767a7 100644 (file)
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
+#include <linux/dm-kcopyd.h>
 
 #include "dm-snap.h"
 #include "dm-bio-list.h"
-#include "kcopyd.h"
 
 #define DM_MSG_PREFIX "snapshots"
 
@@ -36,9 +36,9 @@
 #define SNAPSHOT_COPY_PRIORITY 2
 
 /*
- * Each snapshot reserves this many pages for io
+ * Reserve 1MB for each snapshot initially (with minimum of 1 page).
  */
-#define SNAPSHOT_PAGES 256
+#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
 
 static struct workqueue_struct *ksnapd;
 static void flush_queued_bios(struct work_struct *work);
@@ -536,7 +536,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        s->last_percent = 0;
        init_rwsem(&s->lock);
        spin_lock_init(&s->pe_lock);
-       s->table = ti->table;
+       s->ti = ti;
 
        /* Allocate hash table for COW data */
        if (init_hash_tables(s)) {
@@ -558,7 +558,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad4;
        }
 
-       r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
+       r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
        if (r) {
                ti->error = "Could not create kcopyd client";
                goto bad5;
@@ -591,7 +591,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        return 0;
 
  bad6:
-       kcopyd_client_destroy(s->kcopyd_client);
+       dm_kcopyd_client_destroy(s->kcopyd_client);
 
  bad5:
        s->store.destroy(&s->store);
@@ -613,7 +613,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
 static void __free_exceptions(struct dm_snapshot *s)
 {
-       kcopyd_client_destroy(s->kcopyd_client);
+       dm_kcopyd_client_destroy(s->kcopyd_client);
        s->kcopyd_client = NULL;
 
        exit_exception_table(&s->pending, pending_cache);
@@ -699,7 +699,7 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
 
        s->valid = 0;
 
-       dm_table_event(s->table);
+       dm_table_event(s->ti->table);
 }
 
 static void get_pending_exception(struct dm_snap_pending_exception *pe)
@@ -824,7 +824,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
 static void start_copy(struct dm_snap_pending_exception *pe)
 {
        struct dm_snapshot *s = pe->snap;
-       struct io_region src, dest;
+       struct dm_io_region src, dest;
        struct block_device *bdev = s->origin->bdev;
        sector_t dev_size;
 
@@ -839,7 +839,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
        dest.count = src.count;
 
        /* Hand over to kcopyd */
-       kcopyd_copy(s->kcopyd_client,
+       dm_kcopyd_copy(s->kcopyd_client,
                    &src, 1, &dest, 0, copy_callback, pe);
 }
 
@@ -1060,7 +1060,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
                        goto next_snapshot;
 
                /* Nothing to do if writing beyond end of snapshot */
-               if (bio->bi_sector >= dm_table_get_size(snap->table))
+               if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
                        goto next_snapshot;
 
                /*
index 93bce5d49742e3441fd24b5cd511483b8027c846..24f9fb73b982d0cd148ef15d56428ba6c7ce6877 100644 (file)
@@ -132,7 +132,7 @@ struct exception_store {
 
 struct dm_snapshot {
        struct rw_semaphore lock;
-       struct dm_table *table;
+       struct dm_target *ti;
 
        struct dm_dev *origin;
        struct dm_dev *cow;
@@ -169,7 +169,7 @@ struct dm_snapshot {
        /* The on disk metadata handler */
        struct exception_store store;
 
-       struct kcopyd_client *kcopyd_client;
+       struct dm_kcopyd_client *kcopyd_client;
 
        /* Queue of snapshot writes for ksnapd to flush */
        struct bio_list queued_bios;
index e75b1437b58b67ad8ecac097d6b3c50e9bf0b2ba..51be53344214906593ec54dec6b6e8e1c018f369 100644 (file)
@@ -245,44 +245,6 @@ int dm_table_create(struct dm_table **result, int mode,
        return 0;
 }
 
-int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
-{
-       struct dm_table *t;
-       sector_t dev_size = 1;
-       int r;
-
-       /*
-        * Find current size of device.
-        * Default to 1 sector if inactive.
-        */
-       t = dm_get_table(md);
-       if (t) {
-               dev_size = dm_table_get_size(t);
-               dm_table_put(t);
-       }
-
-       r = dm_table_create(&t, FMODE_READ, 1, md);
-       if (r)
-               return r;
-
-       r = dm_table_add_target(t, "error", 0, dev_size, NULL);
-       if (r)
-               goto out;
-
-       r = dm_table_complete(t);
-       if (r)
-               goto out;
-
-       *result = t;
-
-out:
-       if (r)
-               dm_table_put(t);
-
-       return r;
-}
-EXPORT_SYMBOL_GPL(dm_create_error_table);
-
 static void free_devices(struct list_head *devices)
 {
        struct list_head *tmp, *next;
@@ -954,7 +916,7 @@ void dm_table_presuspend_targets(struct dm_table *t)
        if (!t)
                return;
 
-       return suspend_targets(t, 0);
+       suspend_targets(t, 0);
 }
 
 void dm_table_postsuspend_targets(struct dm_table *t)
@@ -962,7 +924,7 @@ void dm_table_postsuspend_targets(struct dm_table *t)
        if (!t)
                return;
 
-       return suspend_targets(t, 1);
+       suspend_targets(t, 1);
 }
 
 int dm_table_resume_targets(struct dm_table *t)
index 6617ce4af09579285a2be1090adde8c9f61a3fb4..372369b1cc2068c286964883638c2b3a9f2c11c4 100644 (file)
@@ -204,6 +204,7 @@ static int (*_inits[])(void) __initdata = {
        dm_target_init,
        dm_linear_init,
        dm_stripe_init,
+       dm_kcopyd_init,
        dm_interface_init,
 };
 
@@ -212,6 +213,7 @@ static void (*_exits[])(void) = {
        dm_target_exit,
        dm_linear_exit,
        dm_stripe_exit,
+       dm_kcopyd_exit,
        dm_interface_exit,
 };
 
@@ -922,7 +924,7 @@ static void free_minor(int minor)
 /*
  * See if the device with a specific minor # is free.
  */
-static int specific_minor(struct mapped_device *md, int minor)
+static int specific_minor(int minor)
 {
        int r, m;
 
@@ -955,7 +957,7 @@ out:
        return r;
 }
 
-static int next_free_minor(struct mapped_device *md, int *minor)
+static int next_free_minor(int *minor)
 {
        int r, m;
 
@@ -966,9 +968,8 @@ static int next_free_minor(struct mapped_device *md, int *minor)
        spin_lock(&_minor_lock);
 
        r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
-       if (r) {
+       if (r)
                goto out;
-       }
 
        if (m >= (1 << MINORBITS)) {
                idr_remove(&_minor_idr, m);
@@ -991,7 +992,7 @@ static struct block_device_operations dm_blk_dops;
 static struct mapped_device *alloc_dev(int minor)
 {
        int r;
-       struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
+       struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
        void *old_md;
 
        if (!md) {
@@ -1004,13 +1005,12 @@ static struct mapped_device *alloc_dev(int minor)
 
        /* get a minor number for the dev */
        if (minor == DM_ANY_MINOR)
-               r = next_free_minor(md, &minor);
+               r = next_free_minor(&minor);
        else
-               r = specific_minor(md, minor);
+               r = specific_minor(minor);
        if (r < 0)
                goto bad_minor;
 
-       memset(md, 0, sizeof(*md));
        init_rwsem(&md->io_lock);
        mutex_init(&md->suspend_lock);
        spin_lock_init(&md->pushback_lock);
index b4584a39383bdeda58fc1f16f596447648f1e110..8c03b634e62e4a8106980750fb9d948b2fb9327f 100644 (file)
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
 
-#define DM_NAME "device-mapper"
-
-#define DMERR(f, arg...) \
-       printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMERR_LIMIT(f, arg...) \
-       do { \
-               if (printk_ratelimit()) \
-                       printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
-                              f "\n", ## arg); \
-       } while (0)
-
-#define DMWARN(f, arg...) \
-       printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMWARN_LIMIT(f, arg...) \
-       do { \
-               if (printk_ratelimit()) \
-                       printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
-                              f "\n", ## arg); \
-       } while (0)
-
-#define DMINFO(f, arg...) \
-       printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMINFO_LIMIT(f, arg...) \
-       do { \
-               if (printk_ratelimit()) \
-                       printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
-                              "\n", ## arg); \
-       } while (0)
-
-#ifdef CONFIG_DM_DEBUG
-#  define DMDEBUG(f, arg...) \
-       printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
-#  define DMDEBUG_LIMIT(f, arg...) \
-       do { \
-               if (printk_ratelimit()) \
-                       printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
-                              "\n", ## arg); \
-       } while (0)
-#else
-#  define DMDEBUG(f, arg...) do {} while (0)
-#  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
-#endif
-
-#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
-                         0 : scnprintf(result + sz, maxlen - sz, x))
-
-#define SECTOR_SHIFT 9
-
-/*
- * Definitions of return values from target end_io function.
- */
-#define DM_ENDIO_INCOMPLETE    1
-#define DM_ENDIO_REQUEUE       2
-
-/*
- * Definitions of return values from target map function.
- */
-#define DM_MAPIO_SUBMITTED     0
-#define DM_MAPIO_REMAPPED      1
-#define DM_MAPIO_REQUEUE       DM_ENDIO_REQUEUE
-
 /*
  * Suspend feature flags
  */
@@ -136,34 +75,6 @@ static inline int array_too_big(unsigned long fixed, unsigned long obj,
        return (num > (ULONG_MAX - fixed) / obj);
 }
 
-/*
- * Ceiling(n / sz)
- */
-#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
-
-#define dm_sector_div_up(n, sz) ( \
-{ \
-       sector_t _r = ((n) + (sz) - 1); \
-       sector_div(_r, (sz)); \
-       _r; \
-} \
-)
-
-/*
- * ceiling(n / size) * size
- */
-#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
-
-static inline sector_t to_sector(unsigned long n)
-{
-       return (n >> 9);
-}
-
-static inline unsigned long to_bytes(sector_t n)
-{
-       return (n << 9);
-}
-
 int dm_split_args(int *argc, char ***argvp, char *input);
 
 /*
@@ -189,4 +100,13 @@ int dm_lock_for_deletion(struct mapped_device *md);
 
 void dm_kobject_uevent(struct mapped_device *md);
 
+/*
+ * Dirty log
+ */
+int dm_dirty_log_init(void);
+void dm_dirty_log_exit(void);
+
+int dm_kcopyd_init(void);
+void dm_kcopyd_exit(void);
+
 #endif
diff --git a/drivers/md/kcopyd.h b/drivers/md/kcopyd.h
deleted file mode 100644 (file)
index 4845f2a..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2001 Sistina Software
- *
- * This file is released under the GPL.
- *
- * Kcopyd provides a simple interface for copying an area of one
- * block-device to one or more other block-devices, with an asynchronous
- * completion notification.
- */
-
-#ifndef DM_KCOPYD_H
-#define DM_KCOPYD_H
-
-#include "dm-io.h"
-
-/* FIXME: make this configurable */
-#define KCOPYD_MAX_REGIONS 8
-
-#define KCOPYD_IGNORE_ERROR 1
-
-/*
- * To use kcopyd you must first create a kcopyd client object.
- */
-struct kcopyd_client;
-int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result);
-void kcopyd_client_destroy(struct kcopyd_client *kc);
-
-/*
- * Submit a copy job to kcopyd.  This is built on top of the
- * previous three fns.
- *
- * read_err is a boolean,
- * write_err is a bitset, with 1 bit for each destination region
- */
-typedef void (*kcopyd_notify_fn)(int read_err, unsigned long write_err,
-                                void *context);
-
-int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
-               unsigned int num_dests, struct io_region *dests,
-               unsigned int flags, kcopyd_notify_fn fn, void *context);
-
-#endif
index e8503341e3b147f5bc242e640bb1596e261695e9..eed06d068fd12f36f64c11a37ea0e3ea7c611d32 100644 (file)
@@ -158,6 +158,12 @@ config MTD_OF_PARTS
          the partition map from the children of the flash node,
          as described in Documentation/powerpc/booting-without-of.txt.
 
+config MTD_AR7_PARTS
+       tristate "TI AR7 partitioning support"
+       depends on MTD_PARTITIONS
+       ---help---
+         TI AR7 partitioning support
+
 comment "User Modules And Translation Layers"
 
 config MTD_CHAR
index 538e33d11d46ce026265bc5c504b6c08222ee198..4b77335715f0cb64bb702178ced7a547e6aaabea 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT)      += mtdconcat.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
 obj-$(CONFIG_MTD_AFS_PARTS)    += afs.o
+obj-$(CONFIG_MTD_AR7_PARTS)    += ar7part.o
 obj-$(CONFIG_MTD_OF_PARTS)      += ofpart.o
 
 # 'Users' - code which presents functionality to userspace.
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
new file mode 100644 (file)
index 0000000..ecf170b
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright Â© 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ * TI AR7 flash partition table.
+ * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/bootmem.h>
+#include <linux/magic.h>
+
+#define AR7_PARTS      4
+#define ROOT_OFFSET    0xe0000
+
+#define LOADER_MAGIC1  le32_to_cpu(0xfeedfa42)
+#define LOADER_MAGIC2  le32_to_cpu(0xfeed1281)
+
+#ifndef SQUASHFS_MAGIC
+#define SQUASHFS_MAGIC 0x73717368
+#endif
+
+struct ar7_bin_rec {
+       unsigned int checksum;
+       unsigned int length;
+       unsigned int address;
+};
+
+static struct mtd_partition ar7_parts[AR7_PARTS];
+
+static int create_mtd_partitions(struct mtd_info *master,
+                                struct mtd_partition **pparts,
+                                unsigned long origin)
+{
+       struct ar7_bin_rec header;
+       unsigned int offset;
+       size_t len;
+       unsigned int pre_size = master->erasesize, post_size = 0;
+       unsigned int root_offset = ROOT_OFFSET;
+
+       int retries = 10;
+
+       ar7_parts[0].name = "loader";
+       ar7_parts[0].offset = 0;
+       ar7_parts[0].size = master->erasesize;
+       ar7_parts[0].mask_flags = MTD_WRITEABLE;
+
+       ar7_parts[1].name = "config";
+       ar7_parts[1].offset = 0;
+       ar7_parts[1].size = master->erasesize;
+       ar7_parts[1].mask_flags = 0;
+
+       do { /* Try 10 blocks starting from master->erasesize */
+               offset = pre_size;
+               master->read(master, offset,
+                            sizeof(header), &len, (uint8_t *)&header);
+               if (!strncmp((char *)&header, "TIENV0.8", 8))
+                       ar7_parts[1].offset = pre_size;
+               if (header.checksum == LOADER_MAGIC1)
+                       break;
+               if (header.checksum == LOADER_MAGIC2)
+                       break;
+               pre_size += master->erasesize;
+       } while (retries--);
+
+       pre_size = offset;
+
+       if (!ar7_parts[1].offset) {
+               ar7_parts[1].offset = master->size - master->erasesize;
+               post_size = master->erasesize;
+       }
+
+       switch (header.checksum) {
+       case LOADER_MAGIC1:
+               while (header.length) {
+                       offset += sizeof(header) + header.length;
+                       master->read(master, offset, sizeof(header),
+                                    &len, (uint8_t *)&header);
+               }
+               root_offset = offset + sizeof(header) + 4;
+               break;
+       case LOADER_MAGIC2:
+               while (header.length) {
+                       offset += sizeof(header) + header.length;
+                       master->read(master, offset, sizeof(header),
+                                    &len, (uint8_t *)&header);
+               }
+               root_offset = offset + sizeof(header) + 4 + 0xff;
+               root_offset &= ~(uint32_t)0xff;
+               break;
+       default:
+               printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
+               break;
+       }
+
+       master->read(master, root_offset,
+               sizeof(header), &len, (u8 *)&header);
+       if (header.checksum != SQUASHFS_MAGIC) {
+               root_offset += master->erasesize - 1;
+               root_offset &= ~(master->erasesize - 1);
+       }
+
+       ar7_parts[2].name = "linux";
+       ar7_parts[2].offset = pre_size;
+       ar7_parts[2].size = master->size - pre_size - post_size;
+       ar7_parts[2].mask_flags = 0;
+
+       ar7_parts[3].name = "rootfs";
+       ar7_parts[3].offset = root_offset;
+       ar7_parts[3].size = master->size - root_offset - post_size;
+       ar7_parts[3].mask_flags = 0;
+
+       *pparts = ar7_parts;
+       return AR7_PARTS;
+}
+
+static struct mtd_part_parser ar7_parser = {
+       .owner = THIS_MODULE,
+       .parse_fn = create_mtd_partitions,
+       .name = "ar7part",
+};
+
+static int __init ar7_parser_init(void)
+{
+       return register_mtd_parser(&ar7_parser);
+}
+
+module_init(ar7_parser_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
+               "Eugene Konev <ejka@openwrt.org>");
+MODULE_DESCRIPTION("MTD partitioning for TI AR7");
index 0080452531d6a35df3a651e94d46d0f5ac5df003..e812df607a5c9aef596e96c4800881cb2df15967 100644 (file)
@@ -384,7 +384,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
                        if (extp_size > 4096) {
                                printk(KERN_ERR
                                        "%s: cfi_pri_intelext is too fat\n",
-                                       __FUNCTION__);
+                                       __func__);
                                return NULL;
                        }
                        goto again;
@@ -619,6 +619,9 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
                                  sizeof(struct cfi_intelext_blockinfo);
                }
 
+               if (!numparts)
+                       numparts = 1;
+
                /* Programming Region info */
                if (extp->MinorVersion >= '4') {
                        struct cfi_intelext_programming_regioninfo *prinfo;
@@ -641,7 +644,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
                if ((1 << partshift) < mtd->erasesize) {
                        printk( KERN_ERR
                                "%s: bad number of hw partitions (%d)\n",
-                               __FUNCTION__, numparts);
+                               __func__, numparts);
                        return -EINVAL;
                }
 
@@ -1071,10 +1074,10 @@ static int __xipram xip_wait_for_operation(
                        chip->state = newstate;
                        map_write(map, CMD(0xff), adr);
                        (void) map_read(map, adr);
-                       asm volatile (".rep 8; nop; .endr");
+                       xip_iprefetch();
                        local_irq_enable();
                        spin_unlock(chip->mutex);
-                       asm volatile (".rep 8; nop; .endr");
+                       xip_iprefetch();
                        cond_resched();
 
                        /*
@@ -2013,7 +2016,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
 
 #ifdef DEBUG_LOCK_BITS
        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
-              __FUNCTION__, ofs, len);
+              __func__, ofs, len);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
                ofs, len, NULL);
 #endif
@@ -2023,7 +2026,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
 
 #ifdef DEBUG_LOCK_BITS
        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
-              __FUNCTION__, ret);
+              __func__, ret);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
                ofs, len, NULL);
 #endif
@@ -2037,7 +2040,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
 
 #ifdef DEBUG_LOCK_BITS
        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
-              __FUNCTION__, ofs, len);
+              __func__, ofs, len);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
                ofs, len, NULL);
 #endif
@@ -2047,7 +2050,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
 
 #ifdef DEBUG_LOCK_BITS
        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
-              __FUNCTION__, ret);
+              __func__, ret);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
                ofs, len, NULL);
 #endif
index 458d477614d680c0fb718e20e16ee6692fc71a8a..f7fcc6389533c3221dac34ab72878605d4f94e0b 100644 (file)
@@ -220,6 +220,28 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
        mtd->flags |= MTD_POWERUP_LOCK;
 }
 
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+
+       if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
+               cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
+               pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+       }
+}
+
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+
+       if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
+               cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
+               pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+       }
+}
+
 static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 #ifdef AMD_BOOTLOC_BUG
@@ -231,6 +253,10 @@ static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
        { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
        { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
+       { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
+       { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
+       { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
+       { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
 #if !FORCE_WORD_WRITE
        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
 #endif
@@ -723,10 +749,10 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
                        chip->erase_suspended = 1;
                        map_write(map, CMD(0xf0), adr);
                        (void) map_read(map, adr);
-                       asm volatile (".rep 8; nop; .endr");
+                       xip_iprefetch();
                        local_irq_enable();
                        spin_unlock(chip->mutex);
-                       asm volatile (".rep 8; nop; .endr");
+                       xip_iprefetch();
                        cond_resched();
 
                        /*
index 492e2ab27420ff76bcc532c961452eeb75a59e32..1b720cc571f315dfbb1ec2a7faed160835bbb00b 100644 (file)
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
  retry:
 
 #ifdef DEBUG_CFI_FEATURES
-       printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
+       printk("%s: chip->state[%d]\n", __func__, chip->state);
 #endif
        spin_lock_bh(chip->mutex);
 
@@ -463,7 +463,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
                map_write(map, CMD(0x70), cmd_adr);
                 chip->state = FL_STATUS;
 #ifdef DEBUG_CFI_FEATURES
-        printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
+       printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
 #endif
 
        case FL_STATUS:
@@ -591,7 +591,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
         if (map_word_bitsset(map, status, CMD(0x3a))) {
 #ifdef DEBUG_CFI_FEATURES
-               printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
+               printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
 #endif
                /* clear status */
                map_write(map, CMD(0x50), cmd_adr);
@@ -625,9 +625,9 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
        ofs = to  - (chipnum << cfi->chipshift);
 
 #ifdef DEBUG_CFI_FEATURES
-        printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
-        printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
-        printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
+       printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
+       printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
+       printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
 #endif
 
         /* Write buffer is worth it only if more than one word to write... */
@@ -893,7 +893,8 @@ retry:
        return ret;
 }
 
-int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+static int cfi_staa_erase_varsize(struct mtd_info *mtd,
+                                 struct erase_info *instr)
 {      struct map_info *map = mtd->priv;
        struct cfi_private *cfi = map->fldrv_priv;
        unsigned long adr, len;
index f651b6ef1c5d6c44805b066fab343eb965f952d0..a4463a91ce31877881beb14427bf8bc6b6f5fd5a 100644 (file)
@@ -39,7 +39,7 @@ struct mtd_info *cfi_probe(struct map_info *map);
 #define xip_allowed(base, map) \
 do { \
        (void) map_read(map, base); \
-       asm volatile (".rep 8; nop; .endr"); \
+       xip_iprefetch(); \
        local_irq_enable(); \
 } while (0)
 
@@ -232,6 +232,11 @@ static int __xipram cfi_chip_setup(struct map_info *map,
        cfi->mfr = cfi_read_query16(map, base);
        cfi->id = cfi_read_query16(map, base + ofs_factor);
 
+       /* Get AMD/Spansion extended JEDEC ID */
+       if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+               cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+                         cfi_read_query(map, base + 0xf * ofs_factor);
+
        /* Put it back into Read Mode */
        cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
        /* ... even if it's an Intel chip */
index 2e51496c248e0b718273ec3715d5c16b2c689ff8..72e0022a47bf50e360686068b66e6d52dbe895a6 100644 (file)
@@ -65,7 +65,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
 
 #ifdef CONFIG_MTD_XIP
        (void) map_read(map, base);
-       asm volatile (".rep 8; nop; .endr");
+       xip_iprefetch();
        local_irq_enable();
 #endif
 
index 4be51a86a85cbbe711eaca3973ab9716692735ae..aa07575eb28834adfd09683a95ed87e1a5e6ccc6 100644 (file)
 #define M29F800AB      0x0058
 #define M29W800DT      0x00D7
 #define M29W800DB      0x005B
+#define M29W400DT      0x00EE
+#define M29W400DB      0x00EF
 #define M29W160DT      0x22C4
 #define M29W160DB      0x2249
 #define M29W040B       0x00E3
 #define SST49LF030A    0x001C
 #define SST49LF040A    0x0051
 #define SST49LF080A    0x005B
+#define SST36VF3203    0x7354
 
 /* Toshiba */
 #define TC58FVT160     0x00C2
@@ -1113,7 +1116,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x10000,8),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_MACRONIX,
                .dev_id         = MX29F016,
                .name           = "Macronix MX29F016",
@@ -1125,7 +1128,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x10000,32),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_MACRONIX,
                .dev_id         = MX29F004T,
                .name           = "Macronix MX29F004T",
@@ -1140,7 +1143,7 @@ static const struct amd_flash_info jedec_table[] = {
                        ERASEINFO(0x02000,2),
                        ERASEINFO(0x04000,1),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_MACRONIX,
                .dev_id         = MX29F004B,
                .name           = "Macronix MX29F004B",
@@ -1218,7 +1221,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x40000,16),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_SST,
                .dev_id         = SST39LF512,
                .name           = "SST 39LF512",
@@ -1230,7 +1233,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x01000,16),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_SST,
                .dev_id         = SST39LF010,
                .name           = "SST 39LF010",
@@ -1242,7 +1245,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x01000,32),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_SST,
                .dev_id         = SST29EE020,
                .name           = "SST 29EE020",
@@ -1276,7 +1279,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x01000,64),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_SST,
                .dev_id         = SST39LF040,
                .name           = "SST 39LF040",
@@ -1288,7 +1291,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x01000,128),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_SST,
                .dev_id         = SST39SF010A,
                .name           = "SST 39SF010A",
@@ -1300,7 +1303,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x01000,32),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_SST,
                .dev_id         = SST39SF020A,
                .name           = "SST 39SF020A",
@@ -1411,6 +1414,18 @@ static const struct amd_flash_info jedec_table[] = {
                        ERASEINFO(0x1000,256),
                        ERASEINFO(0x1000,256)
                }
+       }, {
+               .mfr_id         = MANUFACTURER_SST,
+               .dev_id         = SST36VF3203,
+               .name           = "SST 36VF3203",
+               .devtypes       = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+               .uaddr          = MTD_UADDR_0x0AAA_0x0555,
+               .dev_size       = SIZE_4MiB,
+               .cmd_set        = P_ID_AMD_STD,
+               .nr_regions     = 1,
+               .regions        = {
+                       ERASEINFO(0x10000,64),
+               }
        }, {
                .mfr_id         = MANUFACTURER_ST,
                .dev_id         = M29F800AB,
@@ -1426,7 +1441,7 @@ static const struct amd_flash_info jedec_table[] = {
                        ERASEINFO(0x08000,1),
                        ERASEINFO(0x10000,15),
                }
-       }, {
+       }, {
                .mfr_id         = MANUFACTURER_ST,      /* FIXME - CFI device? */
                .dev_id         = M29W800DT,
                .name           = "ST M29W800DT",
@@ -1456,6 +1471,36 @@ static const struct amd_flash_info jedec_table[] = {
                        ERASEINFO(0x08000,1),
                        ERASEINFO(0x10000,15)
                }
+       },  {
+               .mfr_id         = MANUFACTURER_ST,
+               .dev_id         = M29W400DT,
+               .name           = "ST M29W400DT",
+               .devtypes       = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+               .uaddr          = MTD_UADDR_0x0AAA_0x0555,
+               .dev_size       = SIZE_512KiB,
+               .cmd_set        = P_ID_AMD_STD,
+               .nr_regions     = 4,
+               .regions        = {
+                       ERASEINFO(0x04000,7),
+                       ERASEINFO(0x02000,1),
+                       ERASEINFO(0x08000,2),
+                       ERASEINFO(0x10000,1)
+               }
+       }, {
+               .mfr_id         = MANUFACTURER_ST,
+               .dev_id         = M29W400DB,
+               .name           = "ST M29W400DB",
+               .devtypes       = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+               .uaddr          = MTD_UADDR_0x0AAA_0x0555,
+               .dev_size       = SIZE_512KiB,
+               .cmd_set        = P_ID_AMD_STD,
+               .nr_regions     = 4,
+               .regions        = {
+                       ERASEINFO(0x04000,1),
+                       ERASEINFO(0x02000,2),
+                       ERASEINFO(0x08000,1),
+                       ERASEINFO(0x10000,7)
+               }
        }, {
                .mfr_id         = MANUFACTURER_ST,      /* FIXME - CFI device? */
                .dev_id         = M29W160DT,
@@ -1486,7 +1531,7 @@ static const struct amd_flash_info jedec_table[] = {
                        ERASEINFO(0x08000,1),
                        ERASEINFO(0x10000,31)
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_ST,
                .dev_id         = M29W040B,
                .name           = "ST M29W040B",
@@ -1498,7 +1543,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x10000,8),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_ST,
                .dev_id         = M50FW040,
                .name           = "ST M50FW040",
@@ -1510,7 +1555,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x10000,8),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_ST,
                .dev_id         = M50FW080,
                .name           = "ST M50FW080",
@@ -1522,7 +1567,7 @@ static const struct amd_flash_info jedec_table[] = {
                .regions        = {
                        ERASEINFO(0x10000,16),
                }
-        }, {
+       }, {
                .mfr_id         = MANUFACTURER_ST,
                .dev_id         = M50FW016,
                .name           = "ST M50FW016",
index b44292abd9f7bd68c94228ac11200b3549dc1711..e472a0e9de9d913228cfa47bc8351b9f360c9e12 100644 (file)
@@ -119,7 +119,8 @@ static struct mtd_partition * newpart(char *s,
                char *p;
 
                name = ++s;
-               if ((p = strchr(name, delim)) == 0)
+               p = strchr(name, delim);
+               if (!p)
                {
                        printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
                        return NULL;
@@ -159,9 +160,10 @@ static struct mtd_partition * newpart(char *s,
                        return NULL;
                }
                /* more partitions follow, parse them */
-               if ((parts = newpart(s + 1, &s, num_parts,
-                                    this_part + 1, &extra_mem, extra_mem_size)) == 0)
-                 return NULL;
+               parts = newpart(s + 1, &s, num_parts, this_part + 1,
+                               &extra_mem, extra_mem_size);
+               if (!parts)
+                       return NULL;
        }
        else
        {       /* this is the last partition: allocate space for all */
@@ -308,9 +310,6 @@ static int parse_cmdline_partitions(struct mtd_info *master,
        struct cmdline_mtd_partition *part;
        char *mtd_id = master->name;
 
-       if(!cmdline)
-               return -EINVAL;
-
        /* parse command line */
        if (!cmdline_parsed)
                mtdpart_setup_real(cmdline);
@@ -341,7 +340,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
                        return part->num_parts;
                }
        }
-       return -EINVAL;
+       return 0;
 }
 
 
index 811d56fd890f6da79135d854ae52fc8d581d167f..35ed1103dbb22096c183eb0a903072ec8ef73e1a 100644 (file)
@@ -77,6 +77,13 @@ config MTD_M25P80
          if you want to specify device partitioning or to use a device which
          doesn't support the JEDEC ID instruction.
 
+config M25PXX_USE_FAST_READ
+       bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz"
+       depends on MTD_M25P80
+       default y
+       help
+         This option enables FAST_READ access supported by ST M25Pxx.
+
 config MTD_SLRAM
        tristate "Uncached system RAM"
        help
index ad1880c67518eead88feb416b72a598fb86a9ba6..519d942e7940c6167672c2d53829530e3e7d9fcc 100644 (file)
@@ -305,7 +305,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
        }
        list_add(&dev->list, &blkmtd_device_list);
        INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
-                       dev->mtd.name + strlen("blkmtd: "),
+                       dev->mtd.name + strlen("block2mtd: "),
                        dev->mtd.erasesize >> 10, dev->mtd.erasesize);
        return dev;
 
@@ -366,9 +366,9 @@ static inline void kill_final_newline(char *str)
 }
 
 
-#define parse_err(fmt, args...) do {           \
-       ERROR("block2mtd: " fmt "\n", ## args); \
-       return 0;                               \
+#define parse_err(fmt, args...) do {   \
+       ERROR(fmt, ## args);            \
+       return 0;                       \
 } while (0)
 
 #ifndef MODULE
@@ -473,7 +473,7 @@ static void __devexit block2mtd_exit(void)
                block2mtd_sync(&dev->mtd);
                del_mtd_device(&dev->mtd);
                INFO("mtd%d: [%s] removed", dev->mtd.index,
-                               dev->mtd.name + strlen("blkmtd: "));
+                               dev->mtd.name + strlen("block2mtd: "));
                list_del(&dev->list);
                block2mtd_free_device(dev);
        }
index 99fd210feaece71fa8f39658e3fa16d4bdebdcfb..1d324e5c412d547834f05eab936e181f92085c67 100644 (file)
@@ -275,7 +275,7 @@ static __u8 read8 (__u32 offset)
 {
    volatile __u8 *data = (__u8 *) (FLASH_OFFSET + offset);
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n",__FUNCTION__,offset,*data);
+   printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n", __func__, offset, *data);
 #endif
    return (*data);
 }
@@ -284,7 +284,7 @@ static __u32 read32 (__u32 offset)
 {
    volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n",__FUNCTION__,offset,*data);
+   printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n", __func__, offset, *data);
 #endif
    return (*data);
 }
@@ -294,7 +294,7 @@ static void write32 (__u32 x,__u32 offset)
    volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
    *data = x;
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,*data);
+   printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n", __func__, offset, *data);
 #endif
 }
 
@@ -337,7 +337,7 @@ static inline int erase_block (__u32 offset)
    __u32 status;
 
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(): 0x%.8x\n",__FUNCTION__,offset);
+   printk (KERN_DEBUG "%s(): 0x%.8x\n", __func__, offset);
 #endif
 
    /* erase and confirm */
@@ -371,7 +371,7 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
    int i,first;
 
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n",__FUNCTION__,instr->addr,instr->len);
+   printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
 #endif
 
    /* sanity checks */
@@ -442,7 +442,7 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
 static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf)
 {
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) from,len);
+   printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
 #endif
 
    /* sanity checks */
@@ -488,7 +488,7 @@ static inline int write_dword (__u32 offset,__u32 x)
    __u32 status;
 
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,x);
+   printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n", __func__, offset, x);
 #endif
 
    /* setup writing */
@@ -524,7 +524,7 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
    int i,n;
 
 #ifdef LART_DEBUG
-   printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) to,len);
+   printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
 #endif
 
    *retlen = 0;
index 98df5bcc02f3b42ea6f3aa5417e3437bd32e9d21..25efd331ef28ce35b4a48f0463f594e221d94aa7 100644 (file)
@@ -33,7 +33,7 @@
 /* Flash opcodes. */
 #define        OPCODE_WREN             0x06    /* Write enable */
 #define        OPCODE_RDSR             0x05    /* Read status register */
-#define        OPCODE_READ             0x03    /* Read data bytes (low frequency) */
+#define        OPCODE_NORM_READ        0x03    /* Read data bytes (low frequency) */
 #define        OPCODE_FAST_READ        0x0b    /* Read data bytes (high frequency) */
 #define        OPCODE_PP               0x02    /* Page program (up to 256 bytes) */
 #define        OPCODE_BE_4K            0x20    /* Erase 4KiB block */
 
 /* Define max times to check status register before we give up. */
 #define        MAX_READY_WAIT_COUNT    100000
+#define        CMD_SIZE                4
 
+#ifdef CONFIG_M25PXX_USE_FAST_READ
+#define OPCODE_READ    OPCODE_FAST_READ
+#define FAST_READ_DUMMY_BYTE 1
+#else
+#define OPCODE_READ    OPCODE_NORM_READ
+#define FAST_READ_DUMMY_BYTE 0
+#endif
 
 #ifdef CONFIG_MTD_PARTITIONS
 #define        mtd_has_partitions()    (1)
@@ -68,7 +76,7 @@ struct m25p {
        struct mtd_info         mtd;
        unsigned                partitioned:1;
        u8                      erase_opcode;
-       u8                      command[4];
+       u8                      command[CMD_SIZE + FAST_READ_DUMMY_BYTE];
 };
 
 static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -151,7 +159,7 @@ static int wait_till_ready(struct m25p *flash)
 static int erase_sector(struct m25p *flash, u32 offset)
 {
        DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
-                       flash->spi->dev.bus_id, __FUNCTION__,
+                       flash->spi->dev.bus_id, __func__,
                        flash->mtd.erasesize / 1024, offset);
 
        /* Wait until finished previous write command. */
@@ -167,7 +175,7 @@ static int erase_sector(struct m25p *flash, u32 offset)
        flash->command[2] = offset >> 8;
        flash->command[3] = offset;
 
-       spi_write(flash->spi, flash->command, sizeof(flash->command));
+       spi_write(flash->spi, flash->command, CMD_SIZE);
 
        return 0;
 }
@@ -188,7 +196,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
        u32 addr,len;
 
        DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
-                       flash->spi->dev.bus_id, __FUNCTION__, "at",
+                       flash->spi->dev.bus_id, __func__, "at",
                        (u32)instr->addr, instr->len);
 
        /* sanity checks */
@@ -240,7 +248,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
        struct spi_message m;
 
        DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
-                       flash->spi->dev.bus_id, __FUNCTION__, "from",
+                       flash->spi->dev.bus_id, __func__, "from",
                        (u32)from, len);
 
        /* sanity checks */
@@ -253,8 +261,12 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
        spi_message_init(&m);
        memset(t, 0, (sizeof t));
 
+       /* NOTE:
+        * OPCODE_FAST_READ (if available) is faster.
+        * Should add 1 byte DUMMY_BYTE.
+        */
        t[0].tx_buf = flash->command;
-       t[0].len = sizeof(flash->command);
+       t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE;
        spi_message_add_tail(&t[0], &m);
 
        t[1].rx_buf = buf;
@@ -287,7 +299,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
 
        spi_sync(flash->spi, &m);
 
-       *retlen = m.actual_length - sizeof(flash->command);
+       *retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE;
 
        mutex_unlock(&flash->lock);
 
@@ -308,7 +320,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
        struct spi_message m;
 
        DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
-                       flash->spi->dev.bus_id, __FUNCTION__, "to",
+                       flash->spi->dev.bus_id, __func__, "to",
                        (u32)to, len);
 
        if (retlen)
@@ -325,7 +337,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
        memset(t, 0, (sizeof t));
 
        t[0].tx_buf = flash->command;
-       t[0].len = sizeof(flash->command);
+       t[0].len = CMD_SIZE;
        spi_message_add_tail(&t[0], &m);
 
        t[1].tx_buf = buf;
@@ -354,7 +366,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
 
                spi_sync(flash->spi, &m);
 
-               *retlen = m.actual_length - sizeof(flash->command);
+               *retlen = m.actual_length - CMD_SIZE;
        } else {
                u32 i;
 
@@ -364,7 +376,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
                t[1].len = page_size;
                spi_sync(flash->spi, &m);
 
-               *retlen = m.actual_length - sizeof(flash->command);
+               *retlen = m.actual_length - CMD_SIZE;
 
                /* write everything in PAGESIZE chunks */
                for (i = page_size; i < len; i += page_size) {
@@ -387,8 +399,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
                        spi_sync(flash->spi, &m);
 
                        if (retlen)
-                               *retlen += m.actual_length
-                                       - sizeof(flash->command);
+                               *retlen += m.actual_length - CMD_SIZE;
                }
        }
 
@@ -435,6 +446,7 @@ static struct flash_info __devinitdata m25p_data [] = {
        { "at25fs040",  0x1f6604, 64 * 1024, 8, SECT_4K, },
 
        { "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, },
+       { "at25df641",  0x1f4800, 64 * 1024, 128, SECT_4K, },
 
        { "at26f004",   0x1f0400, 64 * 1024, 8, SECT_4K, },
        { "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, },
index e427c82d5f4cb046ff4234e4ce8919062569fda3..bf485ff49457303cc3307655d266085ff23ea098 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/mtd/compatmac.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/mtdram.h>
 
 static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
 static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
index 180298b92a7af9a80c229a9969a30852b233fb90..5f960182da957eaa8c4a12295fe93b66677e4287 100644 (file)
@@ -282,7 +282,7 @@ static int phram_setup(const char *val, struct kernel_param *kp)
 }
 
 module_param_call(phram, phram_setup, NULL, NULL, 000);
-MODULE_PARM_DESC(phram,"Memory region to map. \"map=<name>,<start>,<length>\"");
+MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
 
 
 static int __init init_phram(void)
index c815d0f38577281f3b033570d2ac1607a361e15f..4a79b187b568870ad207eaa94e905aec5133e390 100644 (file)
@@ -136,8 +136,6 @@ typedef struct partition_t {
 #endif
 } partition_t;
 
-void ftl_freepart(partition_t *part);
-
 /* Partition state flags */
 #define FTL_FORMATTED  0x01
 
@@ -1014,7 +1012,7 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
 
 /*====================================================================*/
 
-void ftl_freepart(partition_t *part)
+static void ftl_freepart(partition_t *part)
 {
        vfree(part->VirtualBlockMap);
        part->VirtualBlockMap = NULL;
@@ -1069,7 +1067,7 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
        kfree(dev);
 }
 
-struct mtd_blktrans_ops ftl_tr = {
+static struct mtd_blktrans_ops ftl_tr = {
        .name           = "ftl",
        .major          = FTL_MAJOR,
        .part_bits      = PART_BITS,
index b8917beeb65099262da1e316282d663e43933e39..c551d2f0779c2e8c141c3a18c3f5bae05686dd84 100644 (file)
 
 char inftlmountrev[]="$Revision: 1.18 $";
 
-extern int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
-                         size_t *retlen, uint8_t *buf);
-extern int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
-                          size_t *retlen, uint8_t *buf);
-
 /*
  * find_boot_record: Find the INFTL Media Header and its Spare copy which
  *     contains the various device information of the INFTL partition and
index 12c253664eb223e524d31b2fb6bc800f9286cd44..1bd69aa9e22acd2905a28e0e5c76d27d0bd00c94 100644 (file)
@@ -21,6 +21,9 @@ config MTD_PHYSMAP
          particular board as well as the bus width, either statically
          with config options or at run-time.
 
+         To compile this driver as a module, choose M here: the
+         module will be called physmap.
+
 config MTD_PHYSMAP_START
        hex "Physical start address of flash mapping"
        depends on MTD_PHYSMAP
index fc3b2672d1e2a58d18bdd248af1f8362f8b76321..1f492062f8ca7fb18f62f7fbf35ee3ce54a6e758 100644 (file)
@@ -137,7 +137,7 @@ static int bast_flash_probe(struct platform_device *pdev)
        if (info->map.size > AREA_MAXSIZE)
                info->map.size = AREA_MAXSIZE;
 
-       pr_debug("%s: area %08lx, size %ld\n", __FUNCTION__,
+       pr_debug("%s: area %08lx, size %ld\n", __func__,
                 info->map.phys, info->map.size);
 
        info->area = request_mem_region(res->start, info->map.size,
@@ -149,7 +149,7 @@ static int bast_flash_probe(struct platform_device *pdev)
        }
 
        info->map.virt = ioremap(res->start, info->map.size);
-       pr_debug("%s: virt at %08x\n", __FUNCTION__, (int)info->map.virt);
+       pr_debug("%s: virt at %08x\n", __func__, (int)info->map.virt);
 
        if (info->map.virt == 0) {
                printk(KERN_ERR PFX "failed to ioremap() region\n");
@@ -223,3 +223,4 @@ module_exit(bast_flash_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("BAST MTD Map driver");
+MODULE_ALIAS("platform:bast-nor");
index 688ef495888a6ba1efeef5ccff6f9a9646c4a102..59d8fb49270aa93a4d477d2fee8b7cb21ba9438a 100644 (file)
@@ -28,6 +28,9 @@
 
 #define ROM_PROBE_STEP_SIZE (64*1024)
 
+#define DEV_CK804 1
+#define DEV_MCP55 2
+
 struct ck804xrom_window {
        void __iomem *virt;
        unsigned long phys;
@@ -45,8 +48,9 @@ struct ck804xrom_map_info {
        char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
 };
 
-
-/* The 2 bits controlling the window size are often set to allow reading
+/*
+ * The following applies to ck804 only:
+ * The 2 bits controlling the window size are often set to allow reading
  * the BIOS, but too small to allow writing, since the lock registers are
  * 4MiB lower in the address space than the data.
  *
@@ -58,10 +62,17 @@ struct ck804xrom_map_info {
  * If only the 7 Bit is set, it is a 4MiB window.  Otherwise, a
  * 64KiB window.
  *
+ * The following applies to mcp55 only:
+ * The 15 bits controlling the window size are distributed as follows: 
+ * byte @0x88: bit 0..7
+ * byte @0x8c: bit 8..15
+ * word @0x90: bit 16..30
+ * If all bits are enabled, we have a 16? MiB window
+ * Please set win_size_bits to 0x7fffffff if you actually want to do something
  */
 static uint win_size_bits = 0;
 module_param(win_size_bits, uint, 0);
-MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x88 byte, normally set by BIOS.");
+MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS.");
 
 static struct ck804xrom_window ck804xrom_window = {
        .maps = LIST_HEAD_INIT(ck804xrom_window.maps),
@@ -102,10 +113,11 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
 
 
 static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
-       const struct pci_device_id *ent)
+                                        const struct pci_device_id *ent)
 {
        static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
        u8 byte;
+       u16 word;
        struct ck804xrom_window *window = &ck804xrom_window;
        struct ck804xrom_map_info *map = NULL;
        unsigned long map_top;
@@ -113,26 +125,42 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
        /* Remember the pci dev I find the window in */
        window->pdev = pci_dev_get(pdev);
 
-       /* Enable the selected rom window.  This is often incorrectly
-        * set up by the BIOS, and the 4MiB offset for the lock registers
-        * requires the full 5MiB of window space.
-        *
-        * This 'write, then read' approach leaves the bits for
-        * other uses of the hardware info.
-        */
-        pci_read_config_byte(pdev, 0x88, &byte);
-        pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
-
-
-       /* Assume the rom window is properly setup, and find it's size */
-       pci_read_config_byte(pdev, 0x88, &byte);
-
-       if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
-               window->phys = 0xffb00000; /* 5MiB */
-       else if ((byte & (1<<7)) == (1<<7))
-               window->phys = 0xffc00000; /* 4MiB */
-       else
-               window->phys = 0xffff0000; /* 64KiB */
+       switch (ent->driver_data) {
+       case DEV_CK804:
+               /* Enable the selected rom window.  This is often incorrectly
+                * set up by the BIOS, and the 4MiB offset for the lock registers
+                * requires the full 5MiB of window space.
+                *
+                * This 'write, then read' approach leaves the bits for
+                * other uses of the hardware info.
+                */
+               pci_read_config_byte(pdev, 0x88, &byte);
+               pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
+
+               /* Assume the rom window is properly setup, and find it's size */
+               pci_read_config_byte(pdev, 0x88, &byte);
+
+               if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
+                       window->phys = 0xffb00000; /* 5MiB */
+               else if ((byte & (1<<7)) == (1<<7))
+                       window->phys = 0xffc00000; /* 4MiB */
+               else
+                       window->phys = 0xffff0000; /* 64KiB */
+               break;
+
+       case DEV_MCP55:
+               pci_read_config_byte(pdev, 0x88, &byte);
+               pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff));
+
+               pci_read_config_byte(pdev, 0x8c, &byte);
+               pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8));
+
+               pci_read_config_word(pdev, 0x90, &word);
+               pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16));
+
+               window->phys = 0xff000000; /* 16MiB, hardcoded for now */
+               break;
+       }
 
        window->size = 0xffffffffUL - window->phys + 1UL;
 
@@ -303,8 +331,15 @@ static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
 }
 
 static struct pci_device_id ck804xrom_pci_tbl[] = {
-       { PCI_VENDOR_ID_NVIDIA, 0x0051,
-        PCI_ANY_ID, PCI_ANY_ID, }, /* nvidia ck804 */
+       { PCI_VENDOR_ID_NVIDIA, 0x0051, PCI_ANY_ID, PCI_ANY_ID, DEV_CK804 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0360, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0361, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0362, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0363, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0364, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0365, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0366, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
+       { PCI_VENDOR_ID_NVIDIA, 0x0367, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
        { 0, }
 };
 
@@ -332,7 +367,7 @@ static int __init init_ck804xrom(void)
                        break;
        }
        if (pdev) {
-               retVal = ck804xrom_init_one(pdev, &ck804xrom_pci_tbl[0]);
+               retVal = ck804xrom_init_one(pdev, id);
                pci_dev_put(pdev);
                return retVal;
        }
index 6946d802e6f67e1c03a577e01fa345d2b794715b..325c8880c4379393b4266c0849ee0bfc85488344 100644 (file)
@@ -190,6 +190,7 @@ static struct platform_driver armflash_driver = {
        .remove         = armflash_remove,
        .driver         = {
                .name   = "armflash",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -209,3 +210,4 @@ module_exit(armflash_exit);
 MODULE_AUTHOR("ARM Ltd");
 MODULE_DESCRIPTION("ARM Integrator CFI map driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armflash");
index c26488a1793abd769ede3be3083afb5562c98925..c8396b8574c4624debf97d0846ab0a88dd07867f 100644 (file)
@@ -253,6 +253,7 @@ static struct platform_driver ixp2000_flash_driver = {
        .remove         = ixp2000_flash_remove,
        .driver         = {
                .name   = "IXP2000-Flash",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -270,4 +271,4 @@ module_init(ixp2000_flash_init);
 module_exit(ixp2000_flash_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-
+MODULE_ALIAS("platform:IXP2000-Flash");
index 7a828e3e6446d7f9254f083d3b02bb2e9e02e9e0..01f19a4714b5b7383506d929208b0637a6cc7c0f 100644 (file)
@@ -275,6 +275,7 @@ static struct platform_driver ixp4xx_flash_driver = {
        .remove         = ixp4xx_flash_remove,
        .driver         = {
                .name   = "IXP4XX-Flash",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -295,3 +296,4 @@ module_exit(ixp4xx_flash_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
 MODULE_AUTHOR("Deepak Saxena");
+MODULE_ALIAS("platform:IXP4XX-Flash");
index e8d9ae535673d68d04b547f029dbdadefd6b3e58..240b0e2d095d6bade93555986d0cd09cc163aa80 100644 (file)
@@ -70,7 +70,7 @@ static void omap_set_vpp(struct map_info *map, int enable)
        }
 }
 
-static int __devinit omapflash_probe(struct platform_device *pdev)
+static int __init omapflash_probe(struct platform_device *pdev)
 {
        int err;
        struct omapflash_info *info;
@@ -130,7 +130,7 @@ out_free_info:
        return err;
 }
 
-static int __devexit omapflash_remove(struct platform_device *pdev)
+static int __exit omapflash_remove(struct platform_device *pdev)
 {
        struct omapflash_info *info = platform_get_drvdata(pdev);
 
@@ -152,16 +152,16 @@ static int __devexit omapflash_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver omapflash_driver = {
-       .probe  = omapflash_probe,
-       .remove = __devexit_p(omapflash_remove),
+       .remove = __exit_p(omapflash_remove),
        .driver = {
                .name   = "omapflash",
+               .owner  = THIS_MODULE,
        },
 };
 
 static int __init omapflash_init(void)
 {
-       return platform_driver_register(&omapflash_driver);
+       return platform_driver_probe(&omapflash_driver, omapflash_probe);
 }
 
 static void __exit omapflash_exit(void)
@@ -174,4 +174,4 @@ module_exit(omapflash_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards");
-
+MODULE_ALIAS("platform:omapflash");
index eaeb56a4070acadbe9633fddad14bc8b69a0f829..1912d968718b38e81752b8c5df587b9081ef92f7 100644 (file)
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
 #undef DEBUG
 #define DEBUG(n, format, arg...) \
        if (n <= debug) {        \
-               printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \
+               printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
        }
 
 #else
index bc4649a17b9d022d44bab69ebb0de575edb9bdd3..183255fcfdcbd288dc38e697b6a6ed8ff3be3321 100644 (file)
@@ -242,6 +242,7 @@ static struct platform_driver physmap_flash_driver = {
        .shutdown       = physmap_flash_shutdown,
        .driver         = {
                .name   = "physmap-flash",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -319,3 +320,10 @@ module_exit(physmap_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 MODULE_DESCRIPTION("Generic configurable MTD map driver");
+
+/* legacy platform drivers can't hotplug or coldplg */
+#ifndef PHYSMAP_COMPAT
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:physmap-flash");
+#endif
+
index 894c0b27128982fc5d52a6fc79a7b32efecc320f..f0b10ca0502920240b9fee8e3ff728df1808d451 100644 (file)
@@ -47,6 +47,7 @@ struct platram_info {
        struct mtd_info         *mtd;
        struct map_info          map;
        struct mtd_partition    *partitions;
+       bool                    free_partitions;
        struct resource         *area;
        struct platdata_mtd_ram *pdata;
 };
@@ -98,7 +99,8 @@ static int platram_remove(struct platform_device *pdev)
 #ifdef CONFIG_MTD_PARTITIONS
                if (info->partitions) {
                        del_mtd_partitions(info->mtd);
-                       kfree(info->partitions);
+                       if (info->free_partitions)
+                               kfree(info->partitions);
                }
 #endif
                del_mtd_device(info->mtd);
@@ -176,7 +178,8 @@ static int platram_probe(struct platform_device *pdev)
 
        info->map.phys = res->start;
        info->map.size = (res->end - res->start) + 1;
-       info->map.name = pdata->mapname != NULL ? pdata->mapname : (char *)pdev->name;
+       info->map.name = pdata->mapname != NULL ?
+                       (char *)pdata->mapname : (char *)pdev->name;
        info->map.bankwidth = pdata->bankwidth;
 
        /* register our usage of the memory area */
@@ -203,9 +206,19 @@ static int platram_probe(struct platform_device *pdev)
 
        dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
 
-       /* probe for the right mtd map driver */
+       /* probe for the right mtd map driver
+        * supplied by the platform_data struct */
+
+       if (pdata->map_probes != 0) {
+               const char **map_probes = pdata->map_probes;
+
+               for ( ; !info->mtd && *map_probes; map_probes++)
+                       info->mtd = do_map_probe(*map_probes , &info->map);
+       }
+       /* fallback to map_ram */
+       else
+               info->mtd = do_map_probe("map_ram", &info->map);
 
-       info->mtd = do_map_probe("map_ram" , &info->map);
        if (info->mtd == NULL) {
                dev_err(&pdev->dev, "failed to probe for map_ram\n");
                err = -ENOMEM;
@@ -220,19 +233,21 @@ static int platram_probe(struct platform_device *pdev)
         * to add this device whole */
 
 #ifdef CONFIG_MTD_PARTITIONS
-       if (pdata->nr_partitions > 0) {
-               const char **probes = { NULL };
-
-               if (pdata->probes)
-                       probes = (const char **)pdata->probes;
-
-               err = parse_mtd_partitions(info->mtd, probes,
+       if (!pdata->nr_partitions) {
+               /* try to probe using the supplied probe type */
+               if (pdata->probes) {
+                       err = parse_mtd_partitions(info->mtd, pdata->probes,
                                           &info->partitions, 0);
-               if (err > 0) {
-                       err = add_mtd_partitions(info->mtd, info->partitions,
-                                                err);
+                       info->free_partitions = 1;
+                       if (err > 0)
+                               err = add_mtd_partitions(info->mtd,
+                                       info->partitions, err);
                }
        }
+       /* use the static mapping */
+       else
+               err = add_mtd_partitions(info->mtd, pdata->partitions,
+                               pdata->nr_partitions);
 #endif /* CONFIG_MTD_PARTITIONS */
 
        if (add_mtd_device(info->mtd)) {
@@ -240,7 +255,9 @@ static int platram_probe(struct platform_device *pdev)
                err = -ENOMEM;
        }
 
-       dev_info(&pdev->dev, "registered mtd device\n");
+       if (!err)
+               dev_info(&pdev->dev, "registered mtd device\n");
+
        return err;
 
  exit_free:
@@ -251,6 +268,9 @@ static int platram_probe(struct platform_device *pdev)
 
 /* device driver info */
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:mtd-ram");
+
 static struct platform_driver platram_driver = {
        .probe          = platram_probe,
        .remove         = platram_remove,
index 02bde8c982ec68978de22a6a424286019189d4d9..f43ba2815cbbda28a27676d054058dc56e2b0369 100644 (file)
@@ -46,7 +46,7 @@ static struct mtd_partition **msp_parts;
 static struct map_info *msp_maps;
 static int fcnt;
 
-#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n",__FUNCTION__,__LINE__)
+#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
 
 int __init init_msp_flash(void)
 {
index f904e6bd02e05856f220bea1fe89afb0566b60c6..c7d5a52a2d559e997c62ba629bdf38f28f72e2c1 100644 (file)
@@ -456,6 +456,7 @@ static struct platform_driver sa1100_mtd_driver = {
        .shutdown       = sa1100_mtd_shutdown,
        .driver         = {
                .name   = "flash",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -475,3 +476,4 @@ module_exit(sa1100_mtd_exit);
 MODULE_AUTHOR("Nicolas Pitre");
 MODULE_DESCRIPTION("SA1100 CFI map driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:flash");
index 12fe53c0d2fc124eeb6a479f7e94713b8c773507..917dc778f24e12f2c758473c38266804aa517d79 100644 (file)
@@ -92,7 +92,7 @@ int __init init_sharpsl(void)
        parts = sharpsl_partitions;
        nb_parts = ARRAY_SIZE(sharpsl_partitions);
 
-       printk(KERN_NOTICE "Using %s partision definition\n", part_type);
+       printk(KERN_NOTICE "Using %s partition definition\n", part_type);
        add_mtd_partitions(mymtd, parts, nb_parts);
 
        return 0;
index 37e4ded9b60033c2b8dce6005953d77687c3d41c..5217340573140f9f7994e88b4ee0d93e74e013fe 100644 (file)
@@ -124,7 +124,7 @@ int __init init_tqm_mtd(void)
        //request maximum flash size address space
        start_scan_addr = ioremap(flash_addr, flash_size);
        if (!start_scan_addr) {
-               printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __FUNCTION__, flash_addr);
+               printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr);
                return -EIO;
        }
 
@@ -132,7 +132,7 @@ int __init init_tqm_mtd(void)
                if(mtd_size >= flash_size)
                        break;
 
-               printk(KERN_INFO "%s: chip probing count %d\n", __FUNCTION__, idx);
+               printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx);
 
                map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
                if(map_banks[idx] == NULL) {
@@ -178,7 +178,7 @@ int __init init_tqm_mtd(void)
                        mtd_size += mtd_banks[idx]->size;
                        num_banks++;
 
-                       printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __FUNCTION__, num_banks,
+                       printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks,
                        mtd_banks[idx]->name, mtd_banks[idx]->size);
                }
        }
index d3cf05012b46a0a9df82888cb39ccd1bfd07f642..5a680e1e61f14dbc2ba684482792f2f40aabbee9 100644 (file)
@@ -35,7 +35,7 @@
 
 #define OOPS_PAGE_SIZE 4096
 
-struct mtdoops_context {
+static struct mtdoops_context {
        int mtd_index;
        struct work_struct work_erase;
        struct work_struct work_write;
index 959fb86cda01e113aca653ef91c646b26f954aa3..5076faf9ca66c54ae300d960f5befccbfc01fb55 100644 (file)
@@ -278,6 +278,54 @@ config MTD_NAND_AT91
        help
          Enables support for NAND Flash / Smart Media Card interface
          on Atmel AT91 processors.
+choice
+       prompt "ECC management for NAND Flash / SmartMedia on AT91"
+       depends on MTD_NAND_AT91
+
+config MTD_NAND_AT91_ECC_HW
+       bool "Hardware ECC"
+       depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260
+       help
+         Uses hardware ECC provided by the at91sam9260/at91sam9263 chip
+         instead of software ECC.
+         The hardware ECC controller is capable of single bit error
+         correction and 2-bit random detection per page.
+
+         NB : hardware and software ECC schemes are incompatible.
+         If you switch from one to another, you'll have to erase your
+         mtd partition.
+
+         If unsure, say Y
+
+config MTD_NAND_AT91_ECC_SOFT
+       bool "Software ECC"
+       help
+         Uses software ECC.
+
+         NB : hardware and software ECC schemes are incompatible.
+         If you switch from one to another, you'll have to erase your
+         mtd partition.
+
+config MTD_NAND_AT91_ECC_NONE
+       bool "No ECC (testing only, DANGEROUS)"
+       depends on DEBUG_KERNEL
+       help
+         No ECC will be used.
+         It's not a good idea and it should be reserved for testing
+         purpose only.
+
+         If unsure, say N
+
+         endchoice
+
+endchoice
+
+config MTD_NAND_PXA3xx
+       bool "Support for NAND flash devices on PXA3xx"
+       depends on MTD_NAND && PXA3xx
+       help
+         This enables the driver for the NAND flash device found on
+         PXA3xx processors
 
 config MTD_NAND_CM_X270
        tristate "Support for NAND Flash on CM-X270 modules"
@@ -330,4 +378,12 @@ config MTD_NAND_FSL_ELBC
          Enabling this option will enable you to use this to control
          external NAND devices.
 
+config MTD_NAND_FSL_UPM
+       tristate "Support for NAND on Freescale UPM"
+       depends on MTD_NAND && OF_GPIO && (PPC_83xx || PPC_85xx)
+       select FSL_LBC
+       help
+         Enables support for NAND Flash chips wired onto Freescale PowerPC
+         processor localbus with User-Programmable Machine support.
+
 endif # MTD_NAND
index 80d575eeee96663ba0ea51a7256949e18179a271..a6e74a46992a7bde250a5ca7a5d9b20eef96ea00 100644 (file)
@@ -27,10 +27,12 @@ obj-$(CONFIG_MTD_NAND_NDFC)         += ndfc.o
 obj-$(CONFIG_MTD_NAND_AT91)            += at91_nand.o
 obj-$(CONFIG_MTD_NAND_CM_X270)         += cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_BASLER_EXCITE)   += excite_nandflash.o
+obj-$(CONFIG_MTD_NAND_PXA3xx)          += pxa3xx_nand.o
 obj-$(CONFIG_MTD_NAND_PLATFORM)                += plat_nand.o
 obj-$(CONFIG_MTD_ALAUDA)               += alauda.o
 obj-$(CONFIG_MTD_NAND_PASEMI)          += pasemi_nand.o
 obj-$(CONFIG_MTD_NAND_ORION)           += orion_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_ELBC)                += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_UPM)         += fsl_upm.o
 
 nand-objs := nand_base.o nand_bbt.o
index c9fb2acf4056b3baab4ccb1ab1e980b2af08d026..414ceaecdb3a2058bbba13def192553ed40ce29f 100644 (file)
@@ -9,6 +9,15 @@
  *  Derived from drivers/mtd/spia.c
  *      Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
  *
+ *
+ *  Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ *     Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
+ *
+ *     Derived from Das U-Boot source code
+ *                     (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ *     (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 #include <asm/arch/board.h>
 #include <asm/arch/gpio.h>
 
+#ifdef CONFIG_MTD_NAND_AT91_ECC_HW
+#define hard_ecc       1
+#else
+#define hard_ecc       0
+#endif
+
+#ifdef CONFIG_MTD_NAND_AT91_ECC_NONE
+#define no_ecc         1
+#else
+#define no_ecc         0
+#endif
+
+/* Register access macros */
+#define ecc_readl(add, reg)                            \
+       __raw_readl(add + AT91_ECC_##reg)
+#define ecc_writel(add, reg, value)                    \
+       __raw_writel((value), add + AT91_ECC_##reg)
+
+#include <asm/arch/at91_ecc.h> /* AT91SAM9260/3 ECC registers */
+
+/* oob layout for large page size
+ * bad block info is on bytes 0 and 1
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout at91_oobinfo_large = {
+       .eccbytes = 4,
+       .eccpos = {60, 61, 62, 63},
+       .oobfree = {
+               {2, 58}
+       },
+};
+
+/* oob layout for small page size
+ * bad block info is on bytes 4 and 5
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout at91_oobinfo_small = {
+       .eccbytes = 4,
+       .eccpos = {0, 1, 2, 3},
+       .oobfree = {
+               {6, 10}
+       },
+};
+
 struct at91_nand_host {
        struct nand_chip        nand_chip;
        struct mtd_info         mtd;
        void __iomem            *io_base;
        struct at91_nand_data   *board;
+       struct device           *dev;
+       void __iomem            *ecc;
 };
 
 /*
@@ -44,6 +101,12 @@ static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
        struct nand_chip *nand_chip = mtd->priv;
        struct at91_nand_host *host = nand_chip->priv;
 
+       if (host->board->enable_pin && (ctrl & NAND_CTRL_CHANGE)) {
+               if (ctrl & NAND_NCE)
+                       at91_set_gpio_value(host->board->enable_pin, 0);
+               else
+                       at91_set_gpio_value(host->board->enable_pin, 1);
+       }
        if (cmd == NAND_CMD_NONE)
                return;
 
@@ -82,8 +145,217 @@ static void at91_nand_disable(struct at91_nand_host *host)
                at91_set_gpio_value(host->board->enable_pin, 1);
 }
 
+/*
+ * write oob for small pages
+ */
+static int at91_nand_write_oob_512(struct mtd_info *mtd,
+               struct nand_chip *chip, int page)
+{
+       int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+       int eccsize = chip->ecc.size, length = mtd->oobsize;
+       int len, pos, status = 0;
+       const uint8_t *bufpoi = chip->oob_poi;
+
+       pos = eccsize + chunk;
+
+       chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+       len = min_t(int, length, chunk);
+       chip->write_buf(mtd, bufpoi, len);
+       bufpoi += len;
+       length -= len;
+       if (length > 0)
+               chip->write_buf(mtd, bufpoi, length);
+
+       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+       status = chip->waitfunc(mtd, chip);
+
+       return status & NAND_STATUS_FAIL ? -EIO : 0;
+
+}
+
+/*
+ * read oob for small pages
+ */
+static int at91_nand_read_oob_512(struct mtd_info *mtd,
+               struct nand_chip *chip, int page, int sndcmd)
+{
+       if (sndcmd) {
+               chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+               sndcmd = 0;
+       }
+       chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       return sndcmd;
+}
+
+/*
+ * Calculate HW ECC
+ *
+ * function called after a write
+ *
+ * mtd:        MTD block structure
+ * dat:        raw data (unused)
+ * ecc_code:   buffer for ECC
+ */
+static int at91_nand_calculate(struct mtd_info *mtd,
+               const u_char *dat, unsigned char *ecc_code)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct at91_nand_host *host = nand_chip->priv;
+       uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
+       unsigned int ecc_value;
+
+       /* get the first 2 ECC bytes */
+       ecc_value = ecc_readl(host->ecc, PR);
+
+       ecc_code[eccpos[0]] = ecc_value & 0xFF;
+       ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF;
+
+       /* get the last 2 ECC bytes */
+       ecc_value = ecc_readl(host->ecc, NPR) & AT91_ECC_NPARITY;
+
+       ecc_code[eccpos[2]] = ecc_value & 0xFF;
+       ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF;
+
+       return 0;
+}
+
+/*
+ * HW ECC read page function
+ *
+ * mtd:        mtd info structure
+ * chip:       nand chip info structure
+ * buf:        buffer to store read data
+ */
+static int at91_nand_read_page(struct mtd_info *mtd,
+               struct nand_chip *chip, uint8_t *buf)
+{
+       int eccsize = chip->ecc.size;
+       int eccbytes = chip->ecc.bytes;
+       uint32_t *eccpos = chip->ecc.layout->eccpos;
+       uint8_t *p = buf;
+       uint8_t *oob = chip->oob_poi;
+       uint8_t *ecc_pos;
+       int stat;
+
+       /* read the page */
+       chip->read_buf(mtd, p, eccsize);
+
+       /* move to ECC position if needed */
+       if (eccpos[0] != 0) {
+               /* This only works on large pages
+                * because the ECC controller waits for
+                * NAND_CMD_RNDOUTSTART after the
+                * NAND_CMD_RNDOUT.
+                * anyway, for small pages, the eccpos[0] == 0
+                */
+               chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+                               mtd->writesize + eccpos[0], -1);
+       }
+
+       /* the ECC controller needs to read the ECC just after the data */
+       ecc_pos = oob + eccpos[0];
+       chip->read_buf(mtd, ecc_pos, eccbytes);
+
+       /* check if there's an error */
+       stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+       if (stat < 0)
+               mtd->ecc_stats.failed++;
+       else
+               mtd->ecc_stats.corrected += stat;
+
+       /* get back to oob start (end of page) */
+       chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+
+       /* read the oob */
+       chip->read_buf(mtd, oob, mtd->oobsize);
+
+       return 0;
+}
+
+/*
+ * HW ECC Correction
+ *
+ * function called after a read
+ *
+ * mtd:        MTD block structure
+ * dat:        raw data read from the chip
+ * read_ecc:   ECC from the chip (unused)
+ * isnull:     unused
+ *
+ * Detect and correct a 1 bit error for a page
+ */
+static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
+               u_char *read_ecc, u_char *isnull)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct at91_nand_host *host = nand_chip->priv;
+       unsigned int ecc_status;
+       unsigned int ecc_word, ecc_bit;
+
+       /* get the status from the Status Register */
+       ecc_status = ecc_readl(host->ecc, SR);
+
+       /* if there's no error */
+       if (likely(!(ecc_status & AT91_ECC_RECERR)))
+               return 0;
+
+       /* get error bit offset (4 bits) */
+       ecc_bit = ecc_readl(host->ecc, PR) & AT91_ECC_BITADDR;
+       /* get word address (12 bits) */
+       ecc_word = ecc_readl(host->ecc, PR) & AT91_ECC_WORDADDR;
+       ecc_word >>= 4;
+
+       /* if there are multiple errors */
+       if (ecc_status & AT91_ECC_MULERR) {
+               /* check if it is a freshly erased block
+                * (filled with 0xff) */
+               if ((ecc_bit == AT91_ECC_BITADDR)
+                               && (ecc_word == (AT91_ECC_WORDADDR >> 4))) {
+                       /* the block has just been erased, return OK */
+                       return 0;
+               }
+               /* it doesn't seems to be a freshly
+                * erased block.
+                * We can't correct so many errors */
+               dev_dbg(host->dev, "at91_nand : multiple errors detected."
+                               " Unable to correct.\n");
+               return -EIO;
+       }
+
+       /* if there's a single bit error : we can correct it */
+       if (ecc_status & AT91_ECC_ECCERR) {
+               /* there's nothing much to do here.
+                * the bit error is on the ECC itself.
+                */
+               dev_dbg(host->dev, "at91_nand : one bit error on ECC code."
+                               " Nothing to correct\n");
+               return 0;
+       }
+
+       dev_dbg(host->dev, "at91_nand : one bit error on data."
+                       " (word offset in the page :"
+                       " 0x%x bit offset : 0x%x)\n",
+                       ecc_word, ecc_bit);
+       /* correct the error */
+       if (nand_chip->options & NAND_BUSWIDTH_16) {
+               /* 16 bits words */
+               ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
+       } else {
+               /* 8 bits words */
+               dat[ecc_word] ^= (1 << ecc_bit);
+       }
+       dev_dbg(host->dev, "at91_nand : error corrected\n");
+       return 1;
+}
+
+/*
+ * Enable HW ECC : unsused
+ */
+static void at91_nand_hwctl(struct mtd_info *mtd, int mode) { ; }
+
 #ifdef CONFIG_MTD_PARTITIONS
-const char *part_probes[] = { "cmdlinepart", NULL };
+static const char *part_probes[] = { "cmdlinepart", NULL };
 #endif
 
 /*
@@ -94,6 +366,8 @@ static int __init at91_nand_probe(struct platform_device *pdev)
        struct at91_nand_host *host;
        struct mtd_info *mtd;
        struct nand_chip *nand_chip;
+       struct resource *regs;
+       struct resource *mem;
        int res;
 
 #ifdef CONFIG_MTD_PARTITIONS
@@ -108,8 +382,13 @@ static int __init at91_nand_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       host->io_base = ioremap(pdev->resource[0].start,
-                               pdev->resource[0].end - pdev->resource[0].start + 1);
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               printk(KERN_ERR "at91_nand: can't get I/O resource mem\n");
+               return -ENXIO;
+       }
+
+       host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
        if (host->io_base == NULL) {
                printk(KERN_ERR "at91_nand: ioremap failed\n");
                kfree(host);
@@ -119,6 +398,7 @@ static int __init at91_nand_probe(struct platform_device *pdev)
        mtd = &host->mtd;
        nand_chip = &host->nand_chip;
        host->board = pdev->dev.platform_data;
+       host->dev = &pdev->dev;
 
        nand_chip->priv = host;         /* link the private data structures */
        mtd->priv = nand_chip;
@@ -132,7 +412,32 @@ static int __init at91_nand_probe(struct platform_device *pdev)
        if (host->board->rdy_pin)
                nand_chip->dev_ready = at91_nand_device_ready;
 
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!regs && hard_ecc) {
+               printk(KERN_ERR "at91_nand: can't get I/O resource "
+                               "regs\nFalling back on software ECC\n");
+       }
+
        nand_chip->ecc.mode = NAND_ECC_SOFT;    /* enable ECC */
+       if (no_ecc)
+               nand_chip->ecc.mode = NAND_ECC_NONE;
+       if (hard_ecc && regs) {
+               host->ecc = ioremap(regs->start, regs->end - regs->start + 1);
+               if (host->ecc == NULL) {
+                       printk(KERN_ERR "at91_nand: ioremap failed\n");
+                       res = -EIO;
+                       goto err_ecc_ioremap;
+               }
+               nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+               nand_chip->ecc.calculate = at91_nand_calculate;
+               nand_chip->ecc.correct = at91_nand_correct;
+               nand_chip->ecc.hwctl = at91_nand_hwctl;
+               nand_chip->ecc.read_page = at91_nand_read_page;
+               nand_chip->ecc.bytes = 4;
+               nand_chip->ecc.prepad = 0;
+               nand_chip->ecc.postpad = 0;
+       }
+
        nand_chip->chip_delay = 20;             /* 20us command delay time */
 
        if (host->board->bus_width_16)          /* 16-bit bus width */
@@ -149,8 +454,53 @@ static int __init at91_nand_probe(struct platform_device *pdev)
                }
        }
 
-       /* Scan to find existance of the device */
-       if (nand_scan(mtd, 1)) {
+       /* first scan to find the device and get the page size */
+       if (nand_scan_ident(mtd, 1)) {
+               res = -ENXIO;
+               goto out;
+       }
+
+       if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) {
+               /* ECC is calculated for the whole page (1 step) */
+               nand_chip->ecc.size = mtd->writesize;
+
+               /* set ECC page size and oob layout */
+               switch (mtd->writesize) {
+               case 512:
+                       nand_chip->ecc.layout = &at91_oobinfo_small;
+                       nand_chip->ecc.read_oob = at91_nand_read_oob_512;
+                       nand_chip->ecc.write_oob = at91_nand_write_oob_512;
+                       ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_528);
+                       break;
+               case 1024:
+                       nand_chip->ecc.layout = &at91_oobinfo_large;
+                       ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_1056);
+                       break;
+               case 2048:
+                       nand_chip->ecc.layout = &at91_oobinfo_large;
+                       ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_2112);
+                       break;
+               case 4096:
+                       nand_chip->ecc.layout = &at91_oobinfo_large;
+                       ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_4224);
+                       break;
+               default:
+                       /* page size not handled by HW ECC */
+                       /* switching back to soft ECC */
+                       nand_chip->ecc.mode = NAND_ECC_SOFT;
+                       nand_chip->ecc.calculate = NULL;
+                       nand_chip->ecc.correct = NULL;
+                       nand_chip->ecc.hwctl = NULL;
+                       nand_chip->ecc.read_page = NULL;
+                       nand_chip->ecc.postpad = 0;
+                       nand_chip->ecc.prepad = 0;
+                       nand_chip->ecc.bytes = 0;
+                       break;
+               }
+       }
+
+       /* second phase scan */
+       if (nand_scan_tail(mtd)) {
                res = -ENXIO;
                goto out;
        }
@@ -179,9 +529,15 @@ static int __init at91_nand_probe(struct platform_device *pdev)
        if (!res)
                return res;
 
+#ifdef CONFIG_MTD_PARTITIONS
 release:
+#endif
        nand_release(mtd);
+
 out:
+       iounmap(host->ecc);
+
+err_ecc_ioremap:
        at91_nand_disable(host);
        platform_set_drvdata(pdev, NULL);
        iounmap(host->io_base);
@@ -202,6 +558,7 @@ static int __devexit at91_nand_remove(struct platform_device *pdev)
        at91_nand_disable(host);
 
        iounmap(host->io_base);
+       iounmap(host->ecc);
        kfree(host);
 
        return 0;
@@ -233,4 +590,5 @@ module_exit(at91_nand_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rick Bronson");
-MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200");
+MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200 / AT91SAM9");
+MODULE_ALIAS("platform:at91_nand");
index 747042ab094a6b101e558a4f379ff9b2848a81c1..e87a57297328c4a72cde3d01c605e94d0299eee3 100644 (file)
@@ -1,6 +1,6 @@
 /* linux/drivers/mtd/nand/bf5xx_nand.c
  *
- * Copyright 2006-2007 Analog Devices Inc.
+ * Copyright 2006-2008 Analog Devices Inc.
  *     http://blackfin.uclinux.org/
  *     Bryan Wu <bryan.wu@analog.com>
  *
@@ -74,7 +74,7 @@ static int hardware_ecc = 1;
 static int hardware_ecc;
 #endif
 
-static unsigned short bfin_nfc_pin_req[] =
+static const unsigned short bfin_nfc_pin_req[] =
        {P_NAND_CE,
         P_NAND_RB,
         P_NAND_D0,
@@ -581,12 +581,6 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
        bfin_write_NFC_IRQSTAT(val);
        SSYNC();
 
-       if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
-               printk(KERN_ERR DRV_NAME
-               ": Requesting Peripherals failed\n");
-               return -EFAULT;
-       }
-
        /* DMA initialization  */
        if (bf5xx_nand_dma_init(info))
                err = -ENXIO;
@@ -654,6 +648,12 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
 
        dev_dbg(&pdev->dev, "(%p)\n", pdev);
 
+       if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
+               printk(KERN_ERR DRV_NAME
+               ": Requesting Peripherals failed\n");
+               return -EFAULT;
+       }
+
        if (!plat) {
                dev_err(&pdev->dev, "no platform specific information\n");
                goto exit_error;
@@ -803,3 +803,4 @@ module_exit(bf5xx_nand_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR(DRV_AUTHOR);
 MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
index 8dab69657b1914eefb5ebb0eebf1420f2b78edba..3370a800fd3612fd43f6bb54d4cf8a36cbdf2f14 100644 (file)
@@ -279,7 +279,7 @@ static int is_geode(void)
 
 
 #ifdef CONFIG_MTD_PARTITIONS
-const char *part_probes[] = { "cmdlinepart", NULL };
+static const char *part_probes[] = { "cmdlinepart", NULL };
 #endif
 
 
index 378b7aa638125cfd92929cb537fa2bbd1871cf7f..4b69aacdf5ca45410207dca1f8908413f166e1c1 100644 (file)
@@ -184,11 +184,11 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
                 in_be32(&lbc->fbar), in_be32(&lbc->fpar),
                 in_be32(&lbc->fbcr), priv->bank);
 
+       ctrl->irq_status = 0;
        /* execute special operation */
        out_be32(&lbc->lsor, priv->bank);
 
        /* wait for FCM complete flag or timeout */
-       ctrl->irq_status = 0;
        wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
                           FCM_TIMEOUT_MSECS * HZ/1000);
        ctrl->status = ctrl->irq_status;
@@ -346,19 +346,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                ctrl->column = column;
                ctrl->oob = 0;
 
-               fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
-                     (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
-
                if (priv->page_size) {
+                       fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) |
+                             (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT);
+
                        out_be32(&lbc->fir,
                                 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
                                 (FIR_OP_CA  << FIR_OP1_SHIFT) |
                                 (FIR_OP_PA  << FIR_OP2_SHIFT) |
                                 (FIR_OP_WB  << FIR_OP3_SHIFT) |
                                 (FIR_OP_CW1 << FIR_OP4_SHIFT));
-
-                       fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
                } else {
+                       fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
+                             (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
+
                        out_be32(&lbc->fir,
                                 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
                                 (FIR_OP_CM2 << FIR_OP1_SHIFT) |
@@ -480,7 +481,7 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
        struct fsl_elbc_ctrl *ctrl = priv->ctrl;
        unsigned int bufsize = mtd->writesize + mtd->oobsize;
 
-       if (len < 0) {
+       if (len <= 0) {
                dev_err(ctrl->dev, "write_buf of %d bytes", len);
                ctrl->status = 0;
                return;
@@ -495,6 +496,15 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
        }
 
        memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
+       /*
+        * This is workaround for the weird elbc hangs during nand write,
+        * Scott Wood says: "...perhaps difference in how long it takes a
+        * write to make it through the localbus compared to a write to IMMR
+        * is causing problems, and sync isn't helping for some reason."
+        * Reading back the last byte helps though.
+        */
+       in_8(&ctrl->addr[ctrl->index] + len - 1);
+
        ctrl->index += len;
 }
 
@@ -666,7 +676,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
        /* adjust Option Register and ECC to match Flash page size */
        if (mtd->writesize == 512) {
                priv->page_size = 0;
-               clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS);
+               clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
        } else if (mtd->writesize == 2048) {
                priv->page_size = 1;
                setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
@@ -687,11 +697,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
                return -1;
        }
 
-       /* The default u-boot configuration on MPC8313ERDB causes errors;
-        * more delay is needed.  This should be safe for other boards
-        * as well.
-        */
-       setbits32(&lbc->bank[priv->bank].or, 0x70);
        return 0;
 }
 
@@ -779,6 +784,8 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
 
        nand_release(&priv->mtd);
 
+       kfree(priv->mtd.name);
+
        if (priv->vbase)
                iounmap(priv->vbase);
 
@@ -839,6 +846,12 @@ static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
                goto err;
        }
 
+       priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", res.start);
+       if (!priv->mtd.name) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
        ret = fsl_elbc_chip_init(priv);
        if (ret)
                goto err;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
new file mode 100644 (file)
index 0000000..1ebfd87
--- /dev/null
@@ -0,0 +1,291 @@
+/*
+ * Freescale UPM NAND driver.
+ *
+ * Copyright Â© 2007-2008  MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <asm/fsl_lbc.h>
+
+struct fsl_upm_nand {
+       struct device *dev;
+       struct mtd_info mtd;
+       struct nand_chip chip;
+       int last_ctrl;
+#ifdef CONFIG_MTD_PARTITIONS
+       struct mtd_partition *parts;
+#endif
+
+       struct fsl_upm upm;
+       uint8_t upm_addr_offset;
+       uint8_t upm_cmd_offset;
+       void __iomem *io_base;
+       int rnb_gpio;
+       const uint32_t *wait_pattern;
+       const uint32_t *wait_write;
+       int chip_delay;
+};
+
+#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
+
+static int fun_chip_ready(struct mtd_info *mtd)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+       if (gpio_get_value(fun->rnb_gpio))
+               return 1;
+
+       dev_vdbg(fun->dev, "busy\n");
+       return 0;
+}
+
+static void fun_wait_rnb(struct fsl_upm_nand *fun)
+{
+       int cnt = 1000000;
+
+       if (fun->rnb_gpio >= 0) {
+               while (--cnt && !fun_chip_ready(&fun->mtd))
+                       cpu_relax();
+       }
+
+       if (!cnt)
+               dev_err(fun->dev, "tired waiting for RNB\n");
+}
+
+static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+       if (!(ctrl & fun->last_ctrl)) {
+               fsl_upm_end_pattern(&fun->upm);
+
+               if (cmd == NAND_CMD_NONE)
+                       return;
+
+               fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
+       }
+
+       if (ctrl & NAND_CTRL_CHANGE) {
+               if (ctrl & NAND_ALE)
+                       fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+               else if (ctrl & NAND_CLE)
+                       fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+       }
+
+       fsl_upm_run_pattern(&fun->upm, fun->io_base, cmd);
+
+       if (fun->wait_pattern)
+               fun_wait_rnb(fun);
+}
+
+static uint8_t fun_read_byte(struct mtd_info *mtd)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+       return in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+       int i;
+
+       for (i = 0; i < len; i++)
+               buf[i] = in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+       int i;
+
+       for (i = 0; i < len; i++) {
+               out_8(fun->chip.IO_ADDR_W, buf[i]);
+               if (fun->wait_write)
+                       fun_wait_rnb(fun);
+       }
+}
+
+static int __devinit fun_chip_init(struct fsl_upm_nand *fun)
+{
+       int ret;
+#ifdef CONFIG_MTD_PARTITIONS
+       static const char *part_types[] = { "cmdlinepart", NULL, };
+#endif
+
+       fun->chip.IO_ADDR_R = fun->io_base;
+       fun->chip.IO_ADDR_W = fun->io_base;
+       fun->chip.cmd_ctrl = fun_cmd_ctrl;
+       fun->chip.chip_delay = fun->chip_delay;
+       fun->chip.read_byte = fun_read_byte;
+       fun->chip.read_buf = fun_read_buf;
+       fun->chip.write_buf = fun_write_buf;
+       fun->chip.ecc.mode = NAND_ECC_SOFT;
+
+       if (fun->rnb_gpio >= 0)
+               fun->chip.dev_ready = fun_chip_ready;
+
+       fun->mtd.priv = &fun->chip;
+       fun->mtd.owner = THIS_MODULE;
+
+       ret = nand_scan(&fun->mtd, 1);
+       if (ret)
+               return ret;
+
+       fun->mtd.name = fun->dev->bus_id;
+
+#ifdef CONFIG_MTD_PARTITIONS
+       ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
+       if (ret > 0)
+               return add_mtd_partitions(&fun->mtd, fun->parts, ret);
+#endif
+       return add_mtd_device(&fun->mtd);
+}
+
+static int __devinit fun_probe(struct of_device *ofdev,
+                              const struct of_device_id *ofid)
+{
+       struct fsl_upm_nand *fun;
+       struct resource io_res;
+       const uint32_t *prop;
+       int ret;
+       int size;
+
+       fun = kzalloc(sizeof(*fun), GFP_KERNEL);
+       if (!fun)
+               return -ENOMEM;
+
+       ret = of_address_to_resource(ofdev->node, 0, &io_res);
+       if (ret) {
+               dev_err(&ofdev->dev, "can't get IO base\n");
+               goto err1;
+       }
+
+       ret = fsl_upm_find(io_res.start, &fun->upm);
+       if (ret) {
+               dev_err(&ofdev->dev, "can't find UPM\n");
+               goto err1;
+       }
+
+       prop = of_get_property(ofdev->node, "fsl,upm-addr-offset", &size);
+       if (!prop || size != sizeof(uint32_t)) {
+               dev_err(&ofdev->dev, "can't get UPM address offset\n");
+               ret = -EINVAL;
+               goto err2;
+       }
+       fun->upm_addr_offset = *prop;
+
+       prop = of_get_property(ofdev->node, "fsl,upm-cmd-offset", &size);
+       if (!prop || size != sizeof(uint32_t)) {
+               dev_err(&ofdev->dev, "can't get UPM command offset\n");
+               ret = -EINVAL;
+               goto err2;
+       }
+       fun->upm_cmd_offset = *prop;
+
+       fun->rnb_gpio = of_get_gpio(ofdev->node, 0);
+       if (fun->rnb_gpio >= 0) {
+               ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id);
+               if (ret) {
+                       dev_err(&ofdev->dev, "can't request RNB gpio\n");
+                       goto err2;
+               }
+               gpio_direction_input(fun->rnb_gpio);
+       } else if (fun->rnb_gpio == -EINVAL) {
+               dev_err(&ofdev->dev, "specified RNB gpio is invalid\n");
+               goto err2;
+       }
+
+       fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+                                         io_res.end - io_res.start + 1);
+       if (!fun->io_base) {
+               ret = -ENOMEM;
+               goto err2;
+       }
+
+       fun->dev = &ofdev->dev;
+       fun->last_ctrl = NAND_CLE;
+       fun->wait_pattern = of_get_property(ofdev->node, "fsl,wait-pattern",
+                                           NULL);
+       fun->wait_write = of_get_property(ofdev->node, "fsl,wait-write", NULL);
+
+       prop = of_get_property(ofdev->node, "chip-delay", NULL);
+       if (prop)
+               fun->chip_delay = *prop;
+       else
+               fun->chip_delay = 50;
+
+       ret = fun_chip_init(fun);
+       if (ret)
+               goto err2;
+
+       dev_set_drvdata(&ofdev->dev, fun);
+
+       return 0;
+err2:
+       if (fun->rnb_gpio >= 0)
+               gpio_free(fun->rnb_gpio);
+err1:
+       kfree(fun);
+
+       return ret;
+}
+
+static int __devexit fun_remove(struct of_device *ofdev)
+{
+       struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
+
+       nand_release(&fun->mtd);
+
+       if (fun->rnb_gpio >= 0)
+               gpio_free(fun->rnb_gpio);
+
+       kfree(fun);
+
+       return 0;
+}
+
+static struct of_device_id of_fun_match[] = {
+       { .compatible = "fsl,upm-nand" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_fun_match);
+
+static struct of_platform_driver of_fun_driver = {
+       .name           = "fsl,upm-nand",
+       .match_table    = of_fun_match,
+       .probe          = fun_probe,
+       .remove         = __devexit_p(fun_remove),
+};
+
+static int __init fun_module_init(void)
+{
+       return of_register_platform_driver(&of_fun_driver);
+}
+module_init(fun_module_init);
+
+static void __exit fun_module_exit(void)
+{
+       of_unregister_platform_driver(&of_fun_driver);
+}
+module_exit(fun_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
+                  "LocalBus User-Programmable Machine");
index 7acb1a0e7409fbf950a06e97f8e58b3a1be38559..ba1bdf787323190377c20aa239317ee278769b5b 100644 (file)
@@ -2229,6 +2229,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 {
        struct nand_flash_dev *type = NULL;
        int i, dev_id, maf_idx;
+       int tmp_id, tmp_manf;
 
        /* Select the device */
        chip->select_chip(mtd, 0);
@@ -2240,6 +2241,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
        *maf_id = chip->read_byte(mtd);
        dev_id = chip->read_byte(mtd);
 
+       /* Try again to make sure, as some systems the bus-hold or other
+        * interface concerns can cause random data which looks like a
+        * possibly credible NAND flash to appear. If the two results do
+        * not match, ignore the device completely.
+        */
+
+       chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+       /* Read manufacturer and device IDs */
+
+       tmp_manf = chip->read_byte(mtd);
+       tmp_id = chip->read_byte(mtd);
+
+       if (tmp_manf != *maf_id || tmp_id != dev_id) {
+               printk(KERN_INFO "%s: second ID read did not match "
+                      "%02x,%02x against %02x,%02x\n", __func__,
+                      *maf_id, dev_id, tmp_manf, tmp_id);
+               return ERR_PTR(-ENODEV);
+       }
+
        /* Lookup the flash id */
        for (i = 0; nand_flash_ids[i].name != NULL; i++) {
                if (dev_id == nand_flash_ids[i].id) {
index 1c0e89f00e8dc0d98bc1882079874c2330d8c677..955959eb02d49f301463e8b7ec998431df3bcfe8 100644 (file)
@@ -317,3 +317,5 @@ module_exit(ndfc_nand_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
 MODULE_DESCRIPTION("Platform driver for NDFC");
+MODULE_ALIAS("platform:ndfc-chip");
+MODULE_ALIAS("platform:ndfc-nand");
index ec5ad28b237ef338859b072157b7a24f00b32507..59e05a1c50cf438086085dec1ebc6ca7ea4c36ef 100644 (file)
@@ -169,3 +169,4 @@ module_exit(orion_nand_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tzachi Perelstein");
 MODULE_DESCRIPTION("NAND glue for Orion platforms");
+MODULE_ALIAS("platform:orion_nand");
index f6d5c2adc4fd41171c892859b96e85c10f45c369..f674c5427b17c5a7783a38f1b178848070705de9 100644 (file)
@@ -54,6 +54,7 @@ static int __init plat_nand_probe(struct platform_device *pdev)
        data->chip.priv = &data;
        data->mtd.priv = &data->chip;
        data->mtd.owner = THIS_MODULE;
+       data->mtd.name = pdev->dev.bus_id;
 
        data->chip.IO_ADDR_R = data->io_base;
        data->chip.IO_ADDR_W = data->io_base;
@@ -150,3 +151,4 @@ module_exit(plat_nand_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Vitaly Wool");
 MODULE_DESCRIPTION("Simple generic NAND driver");
+MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
new file mode 100644 (file)
index 0000000..fceb468
--- /dev/null
@@ -0,0 +1,1249 @@
+/*
+ * drivers/mtd/nand/pxa3xx_nand.c
+ *
+ * Copyright Â© 2005 Intel Corporation
+ * Copyright Â© 2006 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/dma.h>
+
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/pxa3xx_nand.h>
+
+#define        CHIP_DELAY_TIMEOUT      (2 * HZ/10)
+
+/* registers and bit definitions */
+#define NDCR           (0x00) /* Control register */
+#define NDTR0CS0       (0x04) /* Timing Parameter 0 for CS0 */
+#define NDTR1CS0       (0x0C) /* Timing Parameter 1 for CS0 */
+#define NDSR           (0x14) /* Status Register */
+#define NDPCR          (0x18) /* Page Count Register */
+#define NDBDR0         (0x1C) /* Bad Block Register 0 */
+#define NDBDR1         (0x20) /* Bad Block Register 1 */
+#define NDDB           (0x40) /* Data Buffer */
+#define NDCB0          (0x48) /* Command Buffer0 */
+#define NDCB1          (0x4C) /* Command Buffer1 */
+#define NDCB2          (0x50) /* Command Buffer2 */
+
+#define NDCR_SPARE_EN          (0x1 << 31)
+#define NDCR_ECC_EN            (0x1 << 30)
+#define NDCR_DMA_EN            (0x1 << 29)
+#define NDCR_ND_RUN            (0x1 << 28)
+#define NDCR_DWIDTH_C          (0x1 << 27)
+#define NDCR_DWIDTH_M          (0x1 << 26)
+#define NDCR_PAGE_SZ           (0x1 << 24)
+#define NDCR_NCSX              (0x1 << 23)
+#define NDCR_ND_MODE           (0x3 << 21)
+#define NDCR_NAND_MODE         (0x0)
+#define NDCR_CLR_PG_CNT                (0x1 << 20)
+#define NDCR_CLR_ECC           (0x1 << 19)
+#define NDCR_RD_ID_CNT_MASK    (0x7 << 16)
+#define NDCR_RD_ID_CNT(x)      (((x) << 16) & NDCR_RD_ID_CNT_MASK)
+
+#define NDCR_RA_START          (0x1 << 15)
+#define NDCR_PG_PER_BLK                (0x1 << 14)
+#define NDCR_ND_ARB_EN         (0x1 << 12)
+
+#define NDSR_MASK              (0xfff)
+#define NDSR_RDY               (0x1 << 11)
+#define NDSR_CS0_PAGED         (0x1 << 10)
+#define NDSR_CS1_PAGED         (0x1 << 9)
+#define NDSR_CS0_CMDD          (0x1 << 8)
+#define NDSR_CS1_CMDD          (0x1 << 7)
+#define NDSR_CS0_BBD           (0x1 << 6)
+#define NDSR_CS1_BBD           (0x1 << 5)
+#define NDSR_DBERR             (0x1 << 4)
+#define NDSR_SBERR             (0x1 << 3)
+#define NDSR_WRDREQ            (0x1 << 2)
+#define NDSR_RDDREQ            (0x1 << 1)
+#define NDSR_WRCMDREQ          (0x1)
+
+#define NDCB0_AUTO_RS          (0x1 << 25)
+#define NDCB0_CSEL             (0x1 << 24)
+#define NDCB0_CMD_TYPE_MASK    (0x7 << 21)
+#define NDCB0_CMD_TYPE(x)      (((x) << 21) & NDCB0_CMD_TYPE_MASK)
+#define NDCB0_NC               (0x1 << 20)
+#define NDCB0_DBC              (0x1 << 19)
+#define NDCB0_ADDR_CYC_MASK    (0x7 << 16)
+#define NDCB0_ADDR_CYC(x)      (((x) << 16) & NDCB0_ADDR_CYC_MASK)
+#define NDCB0_CMD2_MASK                (0xff << 8)
+#define NDCB0_CMD1_MASK                (0xff)
+#define NDCB0_ADDR_CYC_SHIFT   (16)
+
+/* dma-able I/O address for the NAND data and commands */
+#define NDCB0_DMA_ADDR         (0x43100048)
+#define NDDB_DMA_ADDR          (0x43100040)
+
+/* macros for registers read/write */
+#define nand_writel(info, off, val)    \
+       __raw_writel((val), (info)->mmio_base + (off))
+
+#define nand_readl(info, off)          \
+       __raw_readl((info)->mmio_base + (off))
+
+/* error code and state */
+enum {
+       ERR_NONE        = 0,
+       ERR_DMABUSERR   = -1,
+       ERR_SENDCMD     = -2,
+       ERR_DBERR       = -3,
+       ERR_BBERR       = -4,
+};
+
+enum {
+       STATE_READY     = 0,
+       STATE_CMD_HANDLE,
+       STATE_DMA_READING,
+       STATE_DMA_WRITING,
+       STATE_DMA_DONE,
+       STATE_PIO_READING,
+       STATE_PIO_WRITING,
+};
+
+struct pxa3xx_nand_timing {
+       unsigned int    tCH;  /* Enable signal hold time */
+       unsigned int    tCS;  /* Enable signal setup time */
+       unsigned int    tWH;  /* ND_nWE high duration */
+       unsigned int    tWP;  /* ND_nWE pulse time */
+       unsigned int    tRH;  /* ND_nRE high duration */
+       unsigned int    tRP;  /* ND_nRE pulse width */
+       unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
+       unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
+       unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
+};
+
+struct pxa3xx_nand_cmdset {
+       uint16_t        read1;
+       uint16_t        read2;
+       uint16_t        program;
+       uint16_t        read_status;
+       uint16_t        read_id;
+       uint16_t        erase;
+       uint16_t        reset;
+       uint16_t        lock;
+       uint16_t        unlock;
+       uint16_t        lock_status;
+};
+
+struct pxa3xx_nand_flash {
+       struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
+       struct pxa3xx_nand_cmdset *cmdset;
+
+       uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
+       uint32_t page_size;     /* Page size in bytes (PAGE_SZ) */
+       uint32_t flash_width;   /* Width of Flash memory (DWIDTH_M) */
+       uint32_t dfc_width;     /* Width of flash controller(DWIDTH_C) */
+       uint32_t num_blocks;    /* Number of physical blocks in Flash */
+       uint32_t chip_id;
+
+       /* NOTE: these are automatically calculated, do not define */
+       size_t          oob_size;
+       size_t          read_id_bytes;
+
+       unsigned int    col_addr_cycles;
+       unsigned int    row_addr_cycles;
+};
+
+struct pxa3xx_nand_info {
+       struct nand_chip        nand_chip;
+
+       struct platform_device   *pdev;
+       struct pxa3xx_nand_flash *flash_info;
+
+       struct clk              *clk;
+       void __iomem            *mmio_base;
+
+       unsigned int            buf_start;
+       unsigned int            buf_count;
+
+       /* DMA information */
+       int                     drcmr_dat;
+       int                     drcmr_cmd;
+
+       unsigned char           *data_buff;
+       dma_addr_t              data_buff_phys;
+       size_t                  data_buff_size;
+       int                     data_dma_ch;
+       struct pxa_dma_desc     *data_desc;
+       dma_addr_t              data_desc_addr;
+
+       uint32_t                reg_ndcr;
+
+       /* saved column/page_addr during CMD_SEQIN */
+       int                     seqin_column;
+       int                     seqin_page_addr;
+
+       /* relate to the command */
+       unsigned int            state;
+
+       int                     use_ecc;        /* use HW ECC ? */
+       int                     use_dma;        /* use DMA ? */
+
+       size_t                  data_size;      /* data size in FIFO */
+       int                     retcode;
+       struct completion       cmd_complete;
+
+       /* generated NDCBx register values */
+       uint32_t                ndcb0;
+       uint32_t                ndcb1;
+       uint32_t                ndcb2;
+};
+
+static int use_dma = 1;
+module_param(use_dma, bool, 0444);
+MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
+
+static struct pxa3xx_nand_cmdset smallpage_cmdset = {
+       .read1          = 0x0000,
+       .read2          = 0x0050,
+       .program        = 0x1080,
+       .read_status    = 0x0070,
+       .read_id        = 0x0090,
+       .erase          = 0xD060,
+       .reset          = 0x00FF,
+       .lock           = 0x002A,
+       .unlock         = 0x2423,
+       .lock_status    = 0x007A,
+};
+
+static struct pxa3xx_nand_cmdset largepage_cmdset = {
+       .read1          = 0x3000,
+       .read2          = 0x0050,
+       .program        = 0x1080,
+       .read_status    = 0x0070,
+       .read_id        = 0x0090,
+       .erase          = 0xD060,
+       .reset          = 0x00FF,
+       .lock           = 0x002A,
+       .unlock         = 0x2423,
+       .lock_status    = 0x007A,
+};
+
+static struct pxa3xx_nand_timing samsung512MbX16_timing = {
+       .tCH    = 10,
+       .tCS    = 0,
+       .tWH    = 20,
+       .tWP    = 40,
+       .tRH    = 30,
+       .tRP    = 40,
+       .tR     = 11123,
+       .tWHR   = 110,
+       .tAR    = 10,
+};
+
+static struct pxa3xx_nand_flash samsung512MbX16 = {
+       .timing         = &samsung512MbX16_timing,
+       .cmdset         = &smallpage_cmdset,
+       .page_per_block = 32,
+       .page_size      = 512,
+       .flash_width    = 16,
+       .dfc_width      = 16,
+       .num_blocks     = 4096,
+       .chip_id        = 0x46ec,
+};
+
+static struct pxa3xx_nand_timing micron_timing = {
+       .tCH    = 10,
+       .tCS    = 25,
+       .tWH    = 15,
+       .tWP    = 25,
+       .tRH    = 15,
+       .tRP    = 25,
+       .tR     = 25000,
+       .tWHR   = 60,
+       .tAR    = 10,
+};
+
+static struct pxa3xx_nand_flash micron1GbX8 = {
+       .timing         = &micron_timing,
+       .cmdset         = &largepage_cmdset,
+       .page_per_block = 64,
+       .page_size      = 2048,
+       .flash_width    = 8,
+       .dfc_width      = 8,
+       .num_blocks     = 1024,
+       .chip_id        = 0xa12c,
+};
+
+static struct pxa3xx_nand_flash micron1GbX16 = {
+       .timing         = &micron_timing,
+       .cmdset         = &largepage_cmdset,
+       .page_per_block = 64,
+       .page_size      = 2048,
+       .flash_width    = 16,
+       .dfc_width      = 16,
+       .num_blocks     = 1024,
+       .chip_id        = 0xb12c,
+};
+
+static struct pxa3xx_nand_flash *builtin_flash_types[] = {
+       &samsung512MbX16,
+       &micron1GbX8,
+       &micron1GbX16,
+};
+
+#define NDTR0_tCH(c)   (min((c), 7) << 19)
+#define NDTR0_tCS(c)   (min((c), 7) << 16)
+#define NDTR0_tWH(c)   (min((c), 7) << 11)
+#define NDTR0_tWP(c)   (min((c), 7) << 8)
+#define NDTR0_tRH(c)   (min((c), 7) << 3)
+#define NDTR0_tRP(c)   (min((c), 7) << 0)
+
+#define NDTR1_tR(c)    (min((c), 65535) << 16)
+#define NDTR1_tWHR(c)  (min((c), 15) << 4)
+#define NDTR1_tAR(c)   (min((c), 15) << 0)
+
+/* convert nano-seconds to nand flash controller clock cycles */
+#define ns2cycle(ns, clk)      (int)(((ns) * (clk / 1000000) / 1000) + 1)
+
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
+                                  struct pxa3xx_nand_timing *t)
+{
+       unsigned long nand_clk = clk_get_rate(info->clk);
+       uint32_t ndtr0, ndtr1;
+
+       ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
+               NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
+               NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
+               NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
+               NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
+               NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
+
+       ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
+               NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
+               NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
+
+       nand_writel(info, NDTR0CS0, ndtr0);
+       nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+#define WAIT_EVENT_TIMEOUT     10
+
+static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
+{
+       int timeout = WAIT_EVENT_TIMEOUT;
+       uint32_t ndsr;
+
+       while (timeout--) {
+               ndsr = nand_readl(info, NDSR) & NDSR_MASK;
+               if (ndsr & event) {
+                       nand_writel(info, NDSR, ndsr);
+                       return 0;
+               }
+               udelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
+                       uint16_t cmd, int column, int page_addr)
+{
+       struct pxa3xx_nand_flash *f = info->flash_info;
+       struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
+
+       /* calculate data size */
+       switch (f->page_size) {
+       case 2048:
+               info->data_size = (info->use_ecc) ? 2088 : 2112;
+               break;
+       case 512:
+               info->data_size = (info->use_ecc) ? 520 : 528;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* generate values for NDCBx registers */
+       info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
+       info->ndcb1 = 0;
+       info->ndcb2 = 0;
+       info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles);
+
+       if (f->col_addr_cycles == 2) {
+               /* large block, 2 cycles for column address
+                * row address starts from 3rd cycle
+                */
+               info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
+               if (f->row_addr_cycles == 3)
+                       info->ndcb2 = (page_addr >> 16) & 0xff;
+       } else
+               /* small block, 1 cycles for column address
+                * row address starts from 2nd cycle
+                */
+               info->ndcb1 = (page_addr << 8) | (column & 0xff);
+
+       if (cmd == cmdset->program)
+               info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
+
+       return 0;
+}
+
+static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
+                       uint16_t cmd, int page_addr)
+{
+       info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
+       info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
+       info->ndcb1 = page_addr;
+       info->ndcb2 = 0;
+       return 0;
+}
+
+static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
+{
+       struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
+
+       info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
+       info->ndcb1 = 0;
+       info->ndcb2 = 0;
+
+       if (cmd == cmdset->read_id) {
+               info->ndcb0 |= NDCB0_CMD_TYPE(3);
+               info->data_size = 8;
+       } else if (cmd == cmdset->read_status) {
+               info->ndcb0 |= NDCB0_CMD_TYPE(4);
+               info->data_size = 8;
+       } else if (cmd == cmdset->reset || cmd == cmdset->lock ||
+                  cmd == cmdset->unlock) {
+               info->ndcb0 |= NDCB0_CMD_TYPE(5);
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+       uint32_t ndcr;
+
+       ndcr = nand_readl(info, NDCR);
+       nand_writel(info, NDCR, ndcr & ~int_mask);
+}
+
+static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+       uint32_t ndcr;
+
+       ndcr = nand_readl(info, NDCR);
+       nand_writel(info, NDCR, ndcr | int_mask);
+}
+
+/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
+ * otherwise, it does not work
+ */
+static int write_cmd(struct pxa3xx_nand_info *info)
+{
+       uint32_t ndcr;
+
+       /* clear status bits and run */
+       nand_writel(info, NDSR, NDSR_MASK);
+
+       ndcr = info->reg_ndcr;
+
+       ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
+       ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+       ndcr |= NDCR_ND_RUN;
+
+       nand_writel(info, NDCR, ndcr);
+
+       if (wait_for_event(info, NDSR_WRCMDREQ)) {
+               printk(KERN_ERR "timed out writing command\n");
+               return -ETIMEDOUT;
+       }
+
+       nand_writel(info, NDCB0, info->ndcb0);
+       nand_writel(info, NDCB0, info->ndcb1);
+       nand_writel(info, NDCB0, info->ndcb2);
+       return 0;
+}
+
+static int handle_data_pio(struct pxa3xx_nand_info *info)
+{
+       int ret, timeout = CHIP_DELAY_TIMEOUT;
+
+       switch (info->state) {
+       case STATE_PIO_WRITING:
+               __raw_writesl(info->mmio_base + NDDB, info->data_buff,
+                               info->data_size << 2);
+
+               enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
+
+               ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
+               if (!ret) {
+                       printk(KERN_ERR "program command time out\n");
+                       return -1;
+               }
+               break;
+       case STATE_PIO_READING:
+               __raw_readsl(info->mmio_base + NDDB, info->data_buff,
+                               info->data_size << 2);
+               break;
+       default:
+               printk(KERN_ERR "%s: invalid state %d\n", __func__,
+                               info->state);
+               return -EINVAL;
+       }
+
+       info->state = STATE_READY;
+       return 0;
+}
+
+static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
+{
+       struct pxa_dma_desc *desc = info->data_desc;
+       int dma_len = ALIGN(info->data_size, 32);
+
+       desc->ddadr = DDADR_STOP;
+       desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
+
+       if (dir_out) {
+               desc->dsadr = info->data_buff_phys;
+               desc->dtadr = NDDB_DMA_ADDR;
+               desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
+       } else {
+               desc->dtadr = info->data_buff_phys;
+               desc->dsadr = NDDB_DMA_ADDR;
+               desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
+       }
+
+       DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
+       DDADR(info->data_dma_ch) = info->data_desc_addr;
+       DCSR(info->data_dma_ch) |= DCSR_RUN;
+}
+
+static void pxa3xx_nand_data_dma_irq(int channel, void *data)
+{
+       struct pxa3xx_nand_info *info = data;
+       uint32_t dcsr;
+
+       dcsr = DCSR(channel);
+       DCSR(channel) = dcsr;
+
+       if (dcsr & DCSR_BUSERR) {
+               info->retcode = ERR_DMABUSERR;
+               complete(&info->cmd_complete);
+       }
+
+       if (info->state == STATE_DMA_WRITING) {
+               info->state = STATE_DMA_DONE;
+               enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
+       } else {
+               info->state = STATE_READY;
+               complete(&info->cmd_complete);
+       }
+}
+
+static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
+{
+       struct pxa3xx_nand_info *info = devid;
+       unsigned int status;
+
+       status = nand_readl(info, NDSR);
+
+       if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
+               if (status & NDSR_DBERR)
+                       info->retcode = ERR_DBERR;
+
+               disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
+
+               if (info->use_dma) {
+                       info->state = STATE_DMA_READING;
+                       start_data_dma(info, 0);
+               } else {
+                       info->state = STATE_PIO_READING;
+                       complete(&info->cmd_complete);
+               }
+       } else if (status & NDSR_WRDREQ) {
+               disable_int(info, NDSR_WRDREQ);
+               if (info->use_dma) {
+                       info->state = STATE_DMA_WRITING;
+                       start_data_dma(info, 1);
+               } else {
+                       info->state = STATE_PIO_WRITING;
+                       complete(&info->cmd_complete);
+               }
+       } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
+               if (status & NDSR_CS0_BBD)
+                       info->retcode = ERR_BBERR;
+
+               disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
+               info->state = STATE_READY;
+               complete(&info->cmd_complete);
+       }
+       nand_writel(info, NDSR, status);
+       return IRQ_HANDLED;
+}
+
+static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
+{
+       uint32_t ndcr;
+       int ret, timeout = CHIP_DELAY_TIMEOUT;
+
+       if (write_cmd(info)) {
+               info->retcode = ERR_SENDCMD;
+               goto fail_stop;
+       }
+
+       info->state = STATE_CMD_HANDLE;
+
+       enable_int(info, event);
+
+       ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
+       if (!ret) {
+               printk(KERN_ERR "command execution timed out\n");
+               info->retcode = ERR_SENDCMD;
+               goto fail_stop;
+       }
+
+       if (info->use_dma == 0 && info->data_size > 0)
+               if (handle_data_pio(info))
+                       goto fail_stop;
+
+       return 0;
+
+fail_stop:
+       ndcr = nand_readl(info, NDCR);
+       nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
+       udelay(10);
+       return -ETIMEDOUT;
+}
+
+static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
+}
+
+static inline int is_buf_blank(uint8_t *buf, size_t len)
+{
+       for (; len > 0; len--)
+               if (*buf++ != 0xff)
+                       return 0;
+       return 1;
+}
+
+static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+                               int column, int page_addr)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       struct pxa3xx_nand_flash *flash_info = info->flash_info;
+       struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
+       int ret;
+
+       info->use_dma = (use_dma) ? 1 : 0;
+       info->use_ecc = 0;
+       info->data_size = 0;
+       info->state = STATE_READY;
+
+       init_completion(&info->cmd_complete);
+
+       switch (command) {
+       case NAND_CMD_READOOB:
+               /* disable HW ECC to get all the OOB data */
+               info->buf_count = mtd->writesize + mtd->oobsize;
+               info->buf_start = mtd->writesize + column;
+
+               if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
+                       break;
+
+               pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
+
+               /* We only are OOB, so if the data has error, does not matter */
+               if (info->retcode == ERR_DBERR)
+                       info->retcode = ERR_NONE;
+               break;
+
+       case NAND_CMD_READ0:
+               info->use_ecc = 1;
+               info->retcode = ERR_NONE;
+               info->buf_start = column;
+               info->buf_count = mtd->writesize + mtd->oobsize;
+               memset(info->data_buff, 0xFF, info->buf_count);
+
+               if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
+                       break;
+
+               pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
+
+               if (info->retcode == ERR_DBERR) {
+                       /* for blank page (all 0xff), HW will calculate its ECC as
+                        * 0, which is different from the ECC information within
+                        * OOB, ignore such double bit errors
+                        */
+                       if (is_buf_blank(info->data_buff, mtd->writesize))
+                               info->retcode = ERR_NONE;
+               }
+               break;
+       case NAND_CMD_SEQIN:
+               info->buf_start = column;
+               info->buf_count = mtd->writesize + mtd->oobsize;
+               memset(info->data_buff, 0xff, info->buf_count);
+
+               /* save column/page_addr for next CMD_PAGEPROG */
+               info->seqin_column = column;
+               info->seqin_page_addr = page_addr;
+               break;
+       case NAND_CMD_PAGEPROG:
+               info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
+
+               if (prepare_read_prog_cmd(info, cmdset->program,
+                               info->seqin_column, info->seqin_page_addr))
+                       break;
+
+               pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
+               break;
+       case NAND_CMD_ERASE1:
+               if (prepare_erase_cmd(info, cmdset->erase, page_addr))
+                       break;
+
+               pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
+               break;
+       case NAND_CMD_ERASE2:
+               break;
+       case NAND_CMD_READID:
+       case NAND_CMD_STATUS:
+               info->use_dma = 0;      /* force PIO read */
+               info->buf_start = 0;
+               info->buf_count = (command == NAND_CMD_READID) ?
+                               flash_info->read_id_bytes : 1;
+
+               if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
+                               cmdset->read_id : cmdset->read_status))
+                       break;
+
+               pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
+               break;
+       case NAND_CMD_RESET:
+               if (prepare_other_cmd(info, cmdset->reset))
+                       break;
+
+               ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
+               if (ret == 0) {
+                       int timeout = 2;
+                       uint32_t ndcr;
+
+                       while (timeout--) {
+                               if (nand_readl(info, NDSR) & NDSR_RDY)
+                                       break;
+                               msleep(10);
+                       }
+
+                       ndcr = nand_readl(info, NDCR);
+                       nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
+               }
+               break;
+       default:
+               printk(KERN_ERR "non-supported command.\n");
+               break;
+       }
+
+       if (info->retcode == ERR_DBERR) {
+               printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
+               info->retcode = ERR_NONE;
+       }
+}
+
+static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       char retval = 0xFF;
+
+       if (info->buf_start < info->buf_count)
+               /* Has just send a new command? */
+               retval = info->data_buff[info->buf_start++];
+
+       return retval;
+}
+
+static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       u16 retval = 0xFFFF;
+
+       if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
+               retval = *((u16 *)(info->data_buff+info->buf_start));
+               info->buf_start += 2;
+       }
+       return retval;
+}
+
+static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+       memcpy(buf, info->data_buff + info->buf_start, real_len);
+       info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
+               const uint8_t *buf, int len)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+       memcpy(info->data_buff + info->buf_start, buf, real_len);
+       info->buf_start += real_len;
+}
+
+static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
+               const uint8_t *buf, int len)
+{
+       return 0;
+}
+
+static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+       return;
+}
+
+static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+
+       /* pxa3xx_nand_send_command has waited for command complete */
+       if (this->state == FL_WRITING || this->state == FL_ERASING) {
+               if (info->retcode == ERR_NONE)
+                       return 0;
+               else {
+                       /*
+                        * any error make it return 0x01 which will tell
+                        * the caller the erase and write fail
+                        */
+                       return 0x01;
+               }
+       }
+
+       return 0;
+}
+
+static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+       return;
+}
+
+static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
+               const uint8_t *dat, uint8_t *ecc_code)
+{
+       return 0;
+}
+
+static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
+               uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+       struct pxa3xx_nand_info *info = mtd->priv;
+       /*
+        * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
+        * consider it as a ecc error which will tell the caller the
+        * read fail We have distinguish all the errors, but the
+        * nand_read_ecc only check this function return value
+        */
+       if (info->retcode != ERR_NONE)
+               return -1;
+
+       return 0;
+}
+
+static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
+{
+       struct pxa3xx_nand_flash *f = info->flash_info;
+       struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
+       uint32_t ndcr;
+       uint8_t  id_buff[8];
+
+       if (prepare_other_cmd(info, cmdset->read_id)) {
+               printk(KERN_ERR "failed to prepare command\n");
+               return -EINVAL;
+       }
+
+       /* Send command */
+       if (write_cmd(info))
+               goto fail_timeout;
+
+       /* Wait for CMDDM(command done successfully) */
+       if (wait_for_event(info, NDSR_RDDREQ))
+               goto fail_timeout;
+
+       __raw_readsl(info->mmio_base + NDDB, id_buff, 2);
+       *id = id_buff[0] | (id_buff[1] << 8);
+       return 0;
+
+fail_timeout:
+       ndcr = nand_readl(info, NDCR);
+       nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
+       udelay(10);
+       return -ETIMEDOUT;
+}
+
+static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
+                                   struct pxa3xx_nand_flash *f)
+{
+       struct platform_device *pdev = info->pdev;
+       struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+       uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
+
+       if (f->page_size != 2048 && f->page_size != 512)
+               return -EINVAL;
+
+       if (f->flash_width != 16 && f->flash_width != 8)
+               return -EINVAL;
+
+       /* calculate flash information */
+       f->oob_size = (f->page_size == 2048) ? 64 : 16;
+       f->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+
+       /* calculate addressing information */
+       f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+
+       if (f->num_blocks * f->page_per_block > 65536)
+               f->row_addr_cycles = 3;
+       else
+               f->row_addr_cycles = 2;
+
+       ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+       ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+       ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
+       ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
+       ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
+       ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
+
+       ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes);
+       ndcr |= NDCR_SPARE_EN; /* enable spare by default */
+
+       info->reg_ndcr = ndcr;
+
+       pxa3xx_nand_set_timing(info, f->timing);
+       info->flash_info = f;
+       return 0;
+}
+
+static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info)
+{
+       struct pxa3xx_nand_flash *f;
+       uint32_t id;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
+
+               f = builtin_flash_types[i];
+
+               if (pxa3xx_nand_config_flash(info, f))
+                       continue;
+
+               if (__readid(info, &id))
+                       continue;
+
+               if (id == f->chip_id)
+                       return 0;
+       }
+
+       return -ENODEV;
+}
+
+/* the maximum possible buffer size for large page with OOB data
+ * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
+ * data buffer and the DMA descriptor
+ */
+#define MAX_BUFF_SIZE  PAGE_SIZE
+
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+       struct platform_device *pdev = info->pdev;
+       int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+
+       if (use_dma == 0) {
+               info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+               if (info->data_buff == NULL)
+                       return -ENOMEM;
+               return 0;
+       }
+
+       info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+                               &info->data_buff_phys, GFP_KERNEL);
+       if (info->data_buff == NULL) {
+               dev_err(&pdev->dev, "failed to allocate dma buffer\n");
+               return -ENOMEM;
+       }
+
+       info->data_buff_size = MAX_BUFF_SIZE;
+       info->data_desc = (void *)info->data_buff + data_desc_offset;
+       info->data_desc_addr = info->data_buff_phys + data_desc_offset;
+
+       info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
+                               pxa3xx_nand_data_dma_irq, info);
+       if (info->data_dma_ch < 0) {
+               dev_err(&pdev->dev, "failed to request data dma\n");
+               dma_free_coherent(&pdev->dev, info->data_buff_size,
+                               info->data_buff, info->data_buff_phys);
+               return info->data_dma_ch;
+       }
+
+       return 0;
+}
+
+static struct nand_ecclayout hw_smallpage_ecclayout = {
+       .eccbytes = 6,
+       .eccpos = {8, 9, 10, 11, 12, 13 },
+       .oobfree = { {2, 6} }
+};
+
+static struct nand_ecclayout hw_largepage_ecclayout = {
+       .eccbytes = 24,
+       .eccpos = {
+               40, 41, 42, 43, 44, 45, 46, 47,
+               48, 49, 50, 51, 52, 53, 54, 55,
+               56, 57, 58, 59, 60, 61, 62, 63},
+       .oobfree = { {2, 38} }
+};
+
+static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
+                                struct pxa3xx_nand_info *info)
+{
+       struct pxa3xx_nand_flash *f = info->flash_info;
+       struct nand_chip *this = &info->nand_chip;
+
+       this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
+
+       this->waitfunc          = pxa3xx_nand_waitfunc;
+       this->select_chip       = pxa3xx_nand_select_chip;
+       this->dev_ready         = pxa3xx_nand_dev_ready;
+       this->cmdfunc           = pxa3xx_nand_cmdfunc;
+       this->read_word         = pxa3xx_nand_read_word;
+       this->read_byte         = pxa3xx_nand_read_byte;
+       this->read_buf          = pxa3xx_nand_read_buf;
+       this->write_buf         = pxa3xx_nand_write_buf;
+       this->verify_buf        = pxa3xx_nand_verify_buf;
+
+       this->ecc.mode          = NAND_ECC_HW;
+       this->ecc.hwctl         = pxa3xx_nand_ecc_hwctl;
+       this->ecc.calculate     = pxa3xx_nand_ecc_calculate;
+       this->ecc.correct       = pxa3xx_nand_ecc_correct;
+       this->ecc.size          = f->page_size;
+
+       if (f->page_size == 2048)
+               this->ecc.layout = &hw_largepage_ecclayout;
+       else
+               this->ecc.layout = &hw_smallpage_ecclayout;
+
+       this->chip_delay = 25;
+}
+
+static int pxa3xx_nand_probe(struct platform_device *pdev)
+{
+       struct pxa3xx_nand_platform_data *pdata;
+       struct pxa3xx_nand_info *info;
+       struct nand_chip *this;
+       struct mtd_info *mtd;
+       struct resource *r;
+       int ret = 0, irq;
+
+       pdata = pdev->dev.platform_data;
+
+       if (!pdata) {
+               dev_err(&pdev->dev, "no platform data defined\n");
+               return -ENODEV;
+       }
+
+       mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
+                       GFP_KERNEL);
+       if (!mtd) {
+               dev_err(&pdev->dev, "failed to allocate memory\n");
+               return -ENOMEM;
+       }
+
+       info = (struct pxa3xx_nand_info *)(&mtd[1]);
+       info->pdev = pdev;
+
+       this = &info->nand_chip;
+       mtd->priv = info;
+
+       info->clk = clk_get(&pdev->dev, "NANDCLK");
+       if (IS_ERR(info->clk)) {
+               dev_err(&pdev->dev, "failed to get nand clock\n");
+               ret = PTR_ERR(info->clk);
+               goto fail_free_mtd;
+       }
+       clk_enable(info->clk);
+
+       r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+       if (r == NULL) {
+               dev_err(&pdev->dev, "no resource defined for data DMA\n");
+               ret = -ENXIO;
+               goto fail_put_clk;
+       }
+       info->drcmr_dat = r->start;
+
+       r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+       if (r == NULL) {
+               dev_err(&pdev->dev, "no resource defined for command DMA\n");
+               ret = -ENXIO;
+               goto fail_put_clk;
+       }
+       info->drcmr_cmd = r->start;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "no IRQ resource defined\n");
+               ret = -ENXIO;
+               goto fail_put_clk;
+       }
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (r == NULL) {
+               dev_err(&pdev->dev, "no IO memory resource defined\n");
+               ret = -ENODEV;
+               goto fail_put_clk;
+       }
+
+       r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
+       if (r == NULL) {
+               dev_err(&pdev->dev, "failed to request memory resource\n");
+               ret = -EBUSY;
+               goto fail_put_clk;
+       }
+
+       info->mmio_base = ioremap(r->start, r->end - r->start + 1);
+       if (info->mmio_base == NULL) {
+               dev_err(&pdev->dev, "ioremap() failed\n");
+               ret = -ENODEV;
+               goto fail_free_res;
+       }
+
+       ret = pxa3xx_nand_init_buff(info);
+       if (ret)
+               goto fail_free_io;
+
+       ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
+                               pdev->name, info);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to request IRQ\n");
+               goto fail_free_buf;
+       }
+
+       ret = pxa3xx_nand_detect_flash(info);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to detect flash\n");
+               ret = -ENODEV;
+               goto fail_free_irq;
+       }
+
+       pxa3xx_nand_init_mtd(mtd, info);
+
+       platform_set_drvdata(pdev, mtd);
+
+       if (nand_scan(mtd, 1)) {
+               dev_err(&pdev->dev, "failed to scan nand\n");
+               ret = -ENXIO;
+               goto fail_free_irq;
+       }
+
+       return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+
+fail_free_irq:
+       free_irq(IRQ_NAND, info);
+fail_free_buf:
+       if (use_dma) {
+               pxa_free_dma(info->data_dma_ch);
+               dma_free_coherent(&pdev->dev, info->data_buff_size,
+                       info->data_buff, info->data_buff_phys);
+       } else
+               kfree(info->data_buff);
+fail_free_io:
+       iounmap(info->mmio_base);
+fail_free_res:
+       release_mem_region(r->start, r->end - r->start + 1);
+fail_put_clk:
+       clk_disable(info->clk);
+       clk_put(info->clk);
+fail_free_mtd:
+       kfree(mtd);
+       return ret;
+}
+
+static int pxa3xx_nand_remove(struct platform_device *pdev)
+{
+       struct mtd_info *mtd = platform_get_drvdata(pdev);
+       struct pxa3xx_nand_info *info = mtd->priv;
+
+       platform_set_drvdata(pdev, NULL);
+
+       del_mtd_device(mtd);
+       del_mtd_partitions(mtd);
+       free_irq(IRQ_NAND, info);
+       if (use_dma) {
+               pxa_free_dma(info->data_dma_ch);
+               dma_free_writecombine(&pdev->dev, info->data_buff_size,
+                               info->data_buff, info->data_buff_phys);
+       } else
+               kfree(info->data_buff);
+       kfree(mtd);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
+       struct pxa3xx_nand_info *info = mtd->priv;
+
+       if (info->state != STATE_READY) {
+               dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static int pxa3xx_nand_resume(struct platform_device *pdev)
+{
+       struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
+       struct pxa3xx_nand_info *info = mtd->priv;
+
+       clk_enable(info->clk);
+
+       return pxa3xx_nand_config_flash(info);
+}
+#else
+#define pxa3xx_nand_suspend    NULL
+#define pxa3xx_nand_resume     NULL
+#endif
+
+static struct platform_driver pxa3xx_nand_driver = {
+       .driver = {
+               .name   = "pxa3xx-nand",
+       },
+       .probe          = pxa3xx_nand_probe,
+       .remove         = pxa3xx_nand_remove,
+       .suspend        = pxa3xx_nand_suspend,
+       .resume         = pxa3xx_nand_resume,
+};
+
+static int __init pxa3xx_nand_init(void)
+{
+       return platform_driver_register(&pxa3xx_nand_driver);
+}
+module_init(pxa3xx_nand_init);
+
+static void __exit pxa3xx_nand_exit(void)
+{
+       platform_driver_unregister(&pxa3xx_nand_driver);
+}
+module_exit(pxa3xx_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PXA3xx NAND controller driver");
index 0f6ac250f434909708bb6a5206ceb1b5c6ae3fad..26f88215bc47d48c12ff9768da72829366d4ac76 100644 (file)
@@ -478,6 +478,7 @@ static int __init rtc_from4_init(void)
        struct nand_chip *this;
        unsigned short bcr1, bcr2, wcr2;
        int i;
+       int ret;
 
        /* Allocate memory for MTD device structure and private data */
        rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
@@ -537,6 +538,22 @@ static int __init rtc_from4_init(void)
        this->ecc.hwctl = rtc_from4_enable_hwecc;
        this->ecc.calculate = rtc_from4_calculate_ecc;
        this->ecc.correct = rtc_from4_correct_data;
+
+       /* We could create the decoder on demand, if memory is a concern.
+        * This way we have it handy, if an error happens
+        *
+        * Symbolsize is 10 (bits)
+        * Primitve polynomial is x^10+x^3+1
+        * first consecutive root is 0
+        * primitve element to generate roots = 1
+        * generator polinomial degree = 6
+        */
+       rs_decoder = init_rs(10, 0x409, 0, 1, 6);
+       if (!rs_decoder) {
+               printk(KERN_ERR "Could not create a RS decoder\n");
+               ret = -ENOMEM;
+               goto err_1;
+       }
 #else
        printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
 
@@ -549,8 +566,8 @@ static int __init rtc_from4_init(void)
 
        /* Scan to find existence of the device */
        if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
-               kfree(rtc_from4_mtd);
-               return -ENXIO;
+               ret = -ENXIO;
+               goto err_2;
        }
 
        /* Perform 'device recovery' for each chip in case there was a power loss. */
@@ -566,28 +583,19 @@ static int __init rtc_from4_init(void)
 #endif
 
        /* Register the partitions */
-       add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
+       ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
+       if (ret)
+               goto err_3;
 
-#ifdef RTC_FROM4_HWECC
-       /* We could create the decoder on demand, if memory is a concern.
-        * This way we have it handy, if an error happens
-        *
-        * Symbolsize is 10 (bits)
-        * Primitve polynomial is x^10+x^3+1
-        * first consecutive root is 0
-        * primitve element to generate roots = 1
-        * generator polinomial degree = 6
-        */
-       rs_decoder = init_rs(10, 0x409, 0, 1, 6);
-       if (!rs_decoder) {
-               printk(KERN_ERR "Could not create a RS decoder\n");
-               nand_release(rtc_from4_mtd);
-               kfree(rtc_from4_mtd);
-               return -ENOMEM;
-       }
-#endif
        /* Return happy */
        return 0;
+err_3:
+       nand_release(rtc_from4_mtd);
+err_2:
+       free_rs(rs_decoder);
+err_1:
+       kfree(rtc_from4_mtd);
+       return ret;
 }
 
 module_init(rtc_from4_init);
index 9260ad947524f4f4a28959a3b843f48a8069c7f8..b34a460ab67915afe3812d6ced1410cfff6c16b7 100644 (file)
@@ -119,8 +119,7 @@ struct s3c2410_nand_info {
        void __iomem                    *sel_reg;
        int                             sel_bit;
        int                             mtd_count;
-
-       unsigned long                   save_nfconf;
+       unsigned long                   save_sel;
 
        enum s3c_cpu_type               cpu_type;
 };
@@ -358,6 +357,14 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
        if (diff0 == 0 && diff1 == 0 && diff2 == 0)
                return 0;               /* ECC is ok */
 
+       /* sometimes people do not think about using the ECC, so check
+        * to see if we have an 0xff,0xff,0xff read ECC and then ignore
+        * the error, on the assumption that this is an un-eccd page.
+        */
+       if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
+           && info->platform->ignore_unset_ecc)
+               return 0;
+
        /* Can we correct this ECC (ie, one row and column change).
         * Note, this is similar to the 256 error code on smartmedia */
 
@@ -473,7 +480,7 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
        ecc_code[1] = ecc >> 8;
        ecc_code[2] = ecc >> 16;
 
-       pr_debug("%s: returning ecc %06lx\n", __func__, ecc);
+       pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
 
        return 0;
 }
@@ -644,9 +651,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
                chip->ecc.calculate = s3c2410_nand_calculate_ecc;
                chip->ecc.correct   = s3c2410_nand_correct_data;
                chip->ecc.mode      = NAND_ECC_HW;
-               chip->ecc.size      = 512;
-               chip->ecc.bytes     = 3;
-               chip->ecc.layout    = &nand_hw_eccoob;
 
                switch (info->cpu_type) {
                case TYPE_S3C2410:
@@ -668,6 +672,40 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
        } else {
                chip->ecc.mode      = NAND_ECC_SOFT;
        }
+
+       if (set->ecc_layout != NULL)
+               chip->ecc.layout = set->ecc_layout;
+
+       if (set->disable_ecc)
+               chip->ecc.mode  = NAND_ECC_NONE;
+}
+
+/* s3c2410_nand_update_chip
+ *
+ * post-probe chip update, to change any items, such as the
+ * layout for large page nand
+ */
+
+static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+                                    struct s3c2410_nand_mtd *nmtd)
+{
+       struct nand_chip *chip = &nmtd->chip;
+
+       printk("%s: chip %p: %d\n", __func__, chip, chip->page_shift);
+
+       if (hardware_ecc) {
+               /* change the behaviour depending on wether we are using
+                * the large or small page nand device */
+
+               if (chip->page_shift > 10) {
+                       chip->ecc.size      = 256;
+                       chip->ecc.bytes     = 3;
+               } else {
+                       chip->ecc.size      = 512;
+                       chip->ecc.bytes     = 3;
+                       chip->ecc.layout    = &nand_hw_eccoob;
+               }
+       }
 }
 
 /* s3c2410_nand_probe
@@ -776,9 +814,12 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
 
                s3c2410_nand_init_chip(info, nmtd, sets);
 
-               nmtd->scan_res = nand_scan(&nmtd->mtd, (sets) ? sets->nr_chips : 1);
+               nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
+                                                (sets) ? sets->nr_chips : 1);
 
                if (nmtd->scan_res == 0) {
+                       s3c2410_nand_update_chip(info, nmtd);
+                       nand_scan_tail(&nmtd->mtd);
                        s3c2410_nand_add_partition(info, nmtd, sets);
                }
 
@@ -810,15 +851,14 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
        struct s3c2410_nand_info *info = platform_get_drvdata(dev);
 
        if (info) {
-               info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
+               info->save_sel = readl(info->sel_reg);
 
                /* For the moment, we must ensure nFCE is high during
                 * the time we are suspended. This really should be
                 * handled by suspending the MTDs we are using, but
                 * that is currently not the case. */
 
-               writel(info->save_nfconf | info->sel_bit,
-                      info->regs + S3C2410_NFCONF);
+               writel(info->save_sel | info->sel_bit, info->sel_reg);
 
                if (!allow_clk_stop(info))
                        clk_disable(info->clk);
@@ -830,7 +870,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
 static int s3c24xx_nand_resume(struct platform_device *dev)
 {
        struct s3c2410_nand_info *info = platform_get_drvdata(dev);
-       unsigned long nfconf;
+       unsigned long sel;
 
        if (info) {
                clk_enable(info->clk);
@@ -838,10 +878,10 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
 
                /* Restore the state of the nFCE line. */
 
-               nfconf = readl(info->regs + S3C2410_NFCONF);
-               nfconf &= ~info->sel_bit;
-               nfconf |= info->save_nfconf & info->sel_bit;
-               writel(nfconf, info->regs + S3C2410_NFCONF);
+               sel = readl(info->sel_reg);
+               sel &= ~info->sel_bit;
+               sel |= info->save_sel & info->sel_bit;
+               writel(sel, info->sel_reg);
 
                if (allow_clk_stop(info))
                        clk_disable(info->clk);
@@ -927,3 +967,6 @@ module_exit(s3c2410_nand_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
+MODULE_ALIAS("platform:s3c2410-nand");
+MODULE_ALIAS("platform:s3c2412-nand");
+MODULE_ALIAS("platform:s3c2440-nand");
index 0513cbc8834d66b78d7e0c85d1f6532447048044..345e6eff89ce5929c3548d1874b6363b102ae554 100644 (file)
 
 char nftlmountrev[]="$Revision: 1.41 $";
 
-extern int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
-                        size_t *retlen, uint8_t *buf);
-extern int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
-                         size_t *retlen, uint8_t *buf);
-
 /* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
  *     various device information of the NFTL partition and Bad Unit Table. Update
  *     the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
index f86e06934cd8d2cb37b64e5c5064ab5125a60978..4f80c2fd89af71a0a0c9e5bae77c013a663a4c7b 100644 (file)
@@ -72,3 +72,5 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
        return nr_parts;
 }
 EXPORT_SYMBOL(of_mtd_parse_partitions);
+
+MODULE_LICENSE("GPL");
index 8d7d21be1541867aca5a494b3965d49c6316c0bd..5d7965f7e9ce6a3f8232a53cb2c0e2a243f13379 100644 (file)
@@ -329,6 +329,21 @@ static int onenand_wait(struct mtd_info *mtd, int state)
                printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", ctrl);
                if (ctrl & ONENAND_CTRL_LOCK)
                        printk(KERN_ERR "onenand_wait: it's locked error.\n");
+               if (state == FL_READING) {
+                       /*
+                        * A power loss while writing can result in a page
+                        * becoming unreadable.  When the device is mounted
+                        * again, reading that page gives controller errors.
+                        * Upper level software like JFFS2 treat -EIO as fatal,
+                        * refusing to mount at all.  That means it is necessary
+                        * to treat the error as an ECC error to allow recovery.
+                        * Note that typically in this case, the eraseblock can
+                        * still be erased and rewritten i.e. it has not become
+                        * a bad block.
+                        */
+                       mtd->ecc_stats.failed++;
+                       return -EBADMSG;
+               }
                return -EIO;
        }
 
@@ -1336,7 +1351,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
        }
 
        /* Reject writes, which are not page aligned */
-        if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
+        if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
                 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
                 return -EINVAL;
         }
@@ -1466,7 +1481,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
        }
 
        /* Reject writes, which are not page aligned */
-        if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
+        if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
                 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n");
                 return -EINVAL;
         }
@@ -2052,7 +2067,7 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
  *
  * Check lock status
  */
-static void onenand_check_lock_status(struct onenand_chip *this)
+static int onenand_check_lock_status(struct onenand_chip *this)
 {
        unsigned int value, block, status;
        unsigned int end;
@@ -2070,9 +2085,13 @@ static void onenand_check_lock_status(struct onenand_chip *this)
 
                /* Check lock status */
                status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
-               if (!(status & ONENAND_WP_US))
+               if (!(status & ONENAND_WP_US)) {
                        printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
+                       return 0;
+               }
        }
+
+       return 1;
 }
 
 /**
@@ -2081,9 +2100,11 @@ static void onenand_check_lock_status(struct onenand_chip *this)
  *
  * Unlock all blocks
  */
-static int onenand_unlock_all(struct mtd_info *mtd)
+static void onenand_unlock_all(struct mtd_info *mtd)
 {
        struct onenand_chip *this = mtd->priv;
+       loff_t ofs = 0;
+       size_t len = this->chipsize;
 
        if (this->options & ONENAND_HAS_UNLOCK_ALL) {
                /* Set start block address */
@@ -2099,23 +2120,19 @@ static int onenand_unlock_all(struct mtd_info *mtd)
                    & ONENAND_CTRL_ONGO)
                        continue;
 
+               /* Check lock status */
+               if (onenand_check_lock_status(this))
+                       return;
+
                /* Workaround for all block unlock in DDP */
                if (ONENAND_IS_DDP(this)) {
-                       /* 1st block on another chip */
-                       loff_t ofs = this->chipsize >> 1;
-                       size_t len = mtd->erasesize;
-
-                       onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+                       /* All blocks on another chip */
+                       ofs = this->chipsize >> 1;
+                       len = this->chipsize >> 1;
                }
-
-               onenand_check_lock_status(this);
-
-               return 0;
        }
 
-       onenand_do_lock_cmd(mtd, 0x0, this->chipsize, ONENAND_CMD_UNLOCK);
-
-       return 0;
+       onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
 }
 
 #ifdef CONFIG_MTD_ONENAND_OTP
index aecdd50a1781354556c9948ed86b4a1f09e52c36..2f53b51c68054cb5a11aa17bf9646c56c7d975e3 100644 (file)
@@ -17,9 +17,6 @@
 #include <linux/mtd/onenand.h>
 #include <linux/mtd/compatmac.h>
 
-extern int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
-                               struct mtd_oob_ops *ops);
-
 /**
  * check_short_pattern - [GENERIC] check if a pattern is in the buffer
  * @param buf          the buffer to search
index 823fba4e6d2fa99c5c5ea2049edd820c9fda1c5c..c84e45465499dba5ad0117c1fc213b4104705171 100644 (file)
@@ -823,7 +823,7 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
        kfree(part);
 }
 
-struct mtd_blktrans_ops rfd_ftl_tr = {
+static struct mtd_blktrans_ops rfd_ftl_tr = {
        .name           = "rfd",
        .major          = RFD_FTL_MAJOR,
        .part_bits      = PART_BITS,
index b9daf159a4a721a0228a59203309120930468ac1..3f063108e95fe3f987c9bdaa8e3a8f2c9255c402 100644 (file)
@@ -24,8 +24,13 @@ config MTD_UBI_WL_THRESHOLD
          erase counter value and the lowest erase counter value of eraseblocks
          of UBI devices. When this threshold is exceeded, UBI starts performing
          wear leveling by means of moving data from eraseblock with low erase
-         counter to eraseblocks with high erase counter. Leave the default
-         value if unsure.
+         counter to eraseblocks with high erase counter.
+
+         The default value should be OK for SLC NAND flashes, NOR flashes and
+         other flashes which have eraseblock life-cycle 100000 or more.
+         However, in case of MLC NAND flashes which typically have eraseblock
+         life-cycle less then 10000, the threshold should be lessened (e.g.,
+         to 128 or 256, although it does not have to be power of 2).
 
 config MTD_UBI_BEB_RESERVE
        int "Percentage of reserved eraseblocks for bad eraseblocks handling"
index 2759604629703e6ea9d9c2e6735478a9cd5a9e1c..961416ac06167350bf689c631a85b0ece7f067ec 100644 (file)
@@ -606,8 +606,16 @@ static int io_init(struct ubi_device *ubi)
                ubi->ro_mode = 1;
        }
 
-       dbg_msg("leb_size         %d", ubi->leb_size);
-       dbg_msg("ro_mode          %d", ubi->ro_mode);
+       ubi_msg("physical eraseblock size:   %d bytes (%d KiB)",
+               ubi->peb_size, ubi->peb_size >> 10);
+       ubi_msg("logical eraseblock size:    %d bytes", ubi->leb_size);
+       ubi_msg("smallest flash I/O unit:    %d", ubi->min_io_size);
+       if (ubi->hdrs_min_io_size != ubi->min_io_size)
+               ubi_msg("sub-page size:              %d",
+                       ubi->hdrs_min_io_size);
+       ubi_msg("VID header offset:          %d (aligned %d)",
+               ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
+       ubi_msg("data offset:                %d", ubi->leb_start);
 
        /*
         * Note, ideally, we have to initialize ubi->bad_peb_count here. But
@@ -755,8 +763,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
        mutex_init(&ubi->volumes_mutex);
        spin_lock_init(&ubi->volumes_lock);
 
-       dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
-               mtd->index, ubi_num, vid_hdr_offset);
+       ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
 
        err = io_init(ubi);
        if (err)
@@ -804,15 +811,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
        ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
        ubi_msg("MTD device name:            \"%s\"", mtd->name);
        ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20);
-       ubi_msg("physical eraseblock size:   %d bytes (%d KiB)",
-               ubi->peb_size, ubi->peb_size >> 10);
-       ubi_msg("logical eraseblock size:    %d bytes", ubi->leb_size);
        ubi_msg("number of good PEBs:        %d", ubi->good_peb_count);
        ubi_msg("number of bad PEBs:         %d", ubi->bad_peb_count);
-       ubi_msg("smallest flash I/O unit:    %d", ubi->min_io_size);
-       ubi_msg("VID header offset:          %d (aligned %d)",
-               ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
-       ubi_msg("data offset:                %d", ubi->leb_start);
        ubi_msg("max. allowed volumes:       %d", ubi->vtbl_slots);
        ubi_msg("wear-leveling threshold:    %d", CONFIG_MTD_UBI_WL_THRESHOLD);
        ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
@@ -950,8 +950,7 @@ static int __init ubi_init(void)
        BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
 
        if (mtd_devs > UBI_MAX_DEVICES) {
-               printk(KERN_ERR "UBI error: too many MTD devices, "
-                      "maximum is %d\n", UBI_MAX_DEVICES);
+               ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
                return -EINVAL;
        }
 
@@ -959,25 +958,25 @@ static int __init ubi_init(void)
        ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
        if (IS_ERR(ubi_class)) {
                err = PTR_ERR(ubi_class);
-               printk(KERN_ERR "UBI error: cannot create UBI class\n");
+               ubi_err("cannot create UBI class");
                goto out;
        }
 
        err = class_create_file(ubi_class, &ubi_version);
        if (err) {
-               printk(KERN_ERR "UBI error: cannot create sysfs file\n");
+               ubi_err("cannot create sysfs file");
                goto out_class;
        }
 
        err = misc_register(&ubi_ctrl_cdev);
        if (err) {
-               printk(KERN_ERR "UBI error: cannot register device\n");
+               ubi_err("cannot register device");
                goto out_version;
        }
 
        ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
-                                               sizeof(struct ubi_wl_entry),
-                                               0, 0, NULL);
+                                             sizeof(struct ubi_wl_entry),
+                                             0, 0, NULL);
        if (!ubi_wl_entry_slab)
                goto out_dev_unreg;
 
@@ -1000,8 +999,7 @@ static int __init ubi_init(void)
                mutex_unlock(&ubi_devices_mutex);
                if (err < 0) {
                        put_mtd_device(mtd);
-                       printk(KERN_ERR "UBI error: cannot attach mtd%d\n",
-                              mtd->index);
+                       ubi_err("cannot attach mtd%d", mtd->index);
                        goto out_detach;
                }
        }
@@ -1023,7 +1021,7 @@ out_version:
 out_class:
        class_destroy(ubi_class);
 out:
-       printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
+       ubi_err("UBI error: cannot initialize UBI, error %d", err);
        return err;
 }
 module_init(ubi_init);
index 51c40b17f1ec5c967994666ad0f7f3ce4b689f39..8ea99d8c9e1f0459862b77bbd54918e3b68bf88a 100644 (file)
@@ -41,7 +41,7 @@
 /* Generic debugging message */
 #define dbg_msg(fmt, ...)                                    \
        printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
-              current->pid, __FUNCTION__, ##__VA_ARGS__)
+              current->pid, __func__, ##__VA_ARGS__)
 
 #define ubi_dbg_dump_stack() dump_stack()
 
@@ -99,8 +99,10 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
 /* Initialization and build messages */
 #define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define UBI_IO_DEBUG 1
 #else
 #define dbg_bld(fmt, ...) ({})
+#define UBI_IO_DEBUG 0
 #endif
 
 #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
index d397219238d3b8ede00b4075b1468c1922963d79..e909b390069a263e63d5d80a9a8bdf239644fb9f 100644 (file)
@@ -291,11 +291,12 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
        /*
         * In case of dynamic volume, MTD device size is just volume size. In
         * case of a static volume the size is equivalent to the amount of data
-        * bytes, which is zero at this moment and will be changed after volume
-        * update.
+        * bytes.
         */
        if (vol->vol_type == UBI_DYNAMIC_VOLUME)
                mtd->size = vol->usable_leb_size * vol->reserved_pebs;
+       else
+               mtd->size = vol->used_bytes;
 
        if (add_mtd_device(mtd)) {
                ubi_err("cannot not add MTD device\n");
index db3efdef2433884eccc04ad03a9de9d63f4d9af2..4ac11df7b048d3a24684744c4f399f13b81653ea 100644 (file)
@@ -631,6 +631,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
 
        dbg_io("read EC header from PEB %d", pnum);
        ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+       if (UBI_IO_DEBUG)
+               verbose = 1;
 
        err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
        if (err) {
@@ -904,6 +906,8 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 
        dbg_io("read VID header from PEB %d", pnum);
        ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
+       if (UBI_IO_DEBUG)
+               verbose = 1;
 
        p = (char *)vid_hdr - ubi->vid_hdr_shift;
        err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
index 05aa3e7daba1edefb88a3b97befb6d4f717fc25c..96d410e106ab1cb72cf9f6bda211e756eaa8c98d 100644 (file)
@@ -42,6 +42,7 @@
 
 #include <linux/err.h>
 #include <linux/crc32.h>
+#include <asm/div64.h>
 #include "ubi.h"
 
 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
@@ -91,27 +92,6 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
        return 0;
 }
 
-/**
- * commit_to_mean_value - commit intermediate results to the final mean erase
- * counter value.
- * @si: scanning information
- *
- * This is a helper function which calculates partial mean erase counter mean
- * value and adds it to the resulting mean value. As we can work only in
- * integer arithmetic and we want to calculate the mean value of erase counter
- * accurately, we first sum erase counter values in @si->ec_sum variable and
- * count these components in @si->ec_count. If this temporary @si->ec_sum is
- * going to overflow, we calculate the partial mean value
- * (@si->ec_sum/@si->ec_count) and add it to @si->mean_ec.
- */
-static void commit_to_mean_value(struct ubi_scan_info *si)
-{
-       si->ec_sum /= si->ec_count;
-       if (si->ec_sum % si->ec_count >= si->ec_count / 2)
-               si->mean_ec += 1;
-       si->mean_ec += si->ec_sum;
-}
-
 /**
  * validate_vid_hdr - check that volume identifier header is correct and
  * consistent.
@@ -901,15 +881,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
 
 adjust_mean_ec:
        if (!ec_corr) {
-               if (si->ec_sum + ec < ec) {
-                       commit_to_mean_value(si);
-                       si->ec_sum = 0;
-                       si->ec_count = 0;
-               } else {
-                       si->ec_sum += ec;
-                       si->ec_count += 1;
-               }
-
+               si->ec_sum += ec;
+               si->ec_count += 1;
                if (ec > si->max_ec)
                        si->max_ec = ec;
                if (ec < si->min_ec)
@@ -965,9 +938,11 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
 
        dbg_msg("scanning is finished");
 
-       /* Finish mean erase counter calculations */
-       if (si->ec_count)
-               commit_to_mean_value(si);
+       /* Calculate mean erase counter */
+       if (si->ec_count) {
+               do_div(si->ec_sum, si->ec_count);
+               si->mean_ec = si->ec_sum;
+       }
 
        if (si->is_empty)
                ubi_msg("empty MTD device detected");
index 46d444af471a24a094fc3c0aa54d76e91dda94bd..966b9b682a423819921f69ca02e626ad0c89772b 100644 (file)
@@ -124,7 +124,7 @@ struct ubi_scan_info {
        int max_ec;
        unsigned long long max_sqnum;
        int mean_ec;
-       int ec_sum;
+       uint64_t ec_sum;
        int ec_count;
 };
 
similarity index 99%
rename from include/mtd/ubi-header.h
rename to drivers/mtd/ubi/ubi-media.h
index 292f916ea5642178947204ba04a1d0f2d5b26c64..c3185d9fd048836dfd09c52e266e1875e9571337 100644 (file)
 
 /*
  * This file defines the layout of UBI headers and all the other UBI on-flash
- * data structures. May be included by user-space.
+ * data structures.
  */
 
-#ifndef __UBI_HEADER_H__
-#define __UBI_HEADER_H__
+#ifndef __UBI_MEDIA_H__
+#define __UBI_MEDIA_H__
 
 #include <asm/byteorder.h>
 
@@ -369,4 +369,4 @@ struct ubi_vtbl_record {
        __be32  crc;
 } __attribute__ ((packed));
 
-#endif /* !__UBI_HEADER_H__ */
+#endif /* !__UBI_MEDIA_H__ */
index a548c1d28fa818a5485aa5524a2527eff269edf0..67dcbd11c15c27c10cfd4349ede90c96ec11ea59 100644 (file)
 #include <linux/string.h>
 #include <linux/vmalloc.h>
 #include <linux/mtd/mtd.h>
-
-#include <mtd/ubi-header.h>
 #include <linux/mtd/ubi.h>
 
+#include "ubi-media.h"
 #include "scan.h"
 #include "debug.h"
 
 #define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
 /* UBI warning messages */
 #define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
-                                 __FUNCTION__, ##__VA_ARGS__)
+                                 __func__, ##__VA_ARGS__)
 /* UBI error messages */
 #define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
-                                __FUNCTION__, ##__VA_ARGS__)
+                                __func__, ##__VA_ARGS__)
 
 /* Lowest number PEBs reserved for bad PEB handling */
 #define MIN_RESEVED_PEBS 2
index 978e20a1791bda8239cbc213b0e0afebab4461b4..1e39e78f1778c5228d7e6345005e900a347ccbff 100644 (file)
@@ -1248,3 +1248,4 @@ module_exit(at91ether_exit)
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
 MODULE_AUTHOR("Andrew Victor");
+MODULE_ALIAS("platform:" DRV_NAME);
index 91a6590d107b181177a74e0bcd71003e9c4a906c..ecd8fc6146e9faa710351eb8765c8538807bcc51 100644 (file)
@@ -897,6 +897,7 @@ static struct platform_driver ep93xx_eth_driver = {
        .remove         = ep93xx_eth_remove,
        .driver         = {
                .name   = "ep93xx-eth",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -914,3 +915,4 @@ static void __exit ep93xx_eth_cleanup_module(void)
 module_init(ep93xx_eth_init_module);
 module_exit(ep93xx_eth_cleanup_module);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-eth");
index 5586fc62468880a0df991cd72d461bf6e5ab77a3..0afe522b8f7b901e2a078ff33739a0a9866299c3 100644 (file)
 /* Temporary hack for merging atl1 and atl2 */
 #include "atlx.c"
 
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL1_MAX_NIC 4
+
+#define OPTION_UNSET    -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
+
+/*
+ * Interrupt Moderate Timer in units of 2 us
+ *
+ * Valid Range: 10-65535
+ *
+ * Default Value: 100 (200us)
+ */
+static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static int num_int_mod_timer;
+module_param_array_named(int_mod_timer, int_mod_timer, int,
+       &num_int_mod_timer, 0);
+MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
+
+#define DEFAULT_INT_MOD_CNT    100     /* 200us */
+#define MAX_INT_MOD_CNT                65000
+#define MIN_INT_MOD_CNT                50
+
+struct atl1_option {
+       enum { enable_option, range_option, list_option } type;
+       char *name;
+       char *err;
+       int def;
+       union {
+               struct {        /* range_option info */
+                       int min;
+                       int max;
+               } r;
+               struct {        /* list_option info */
+                       int nr;
+                       struct atl1_opt_list {
+                               int i;
+                               char *str;
+                       } *p;
+               } l;
+       } arg;
+};
+
+static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
+       struct pci_dev *pdev)
+{
+       if (*value == OPTION_UNSET) {
+               *value = opt->def;
+               return 0;
+       }
+
+       switch (opt->type) {
+       case enable_option:
+               switch (*value) {
+               case OPTION_ENABLED:
+                       dev_info(&pdev->dev, "%s enabled\n", opt->name);
+                       return 0;
+               case OPTION_DISABLED:
+                       dev_info(&pdev->dev, "%s disabled\n", opt->name);
+                       return 0;
+               }
+               break;
+       case range_option:
+               if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+                       dev_info(&pdev->dev, "%s set to %i\n", opt->name,
+                               *value);
+                       return 0;
+               }
+               break;
+       case list_option:{
+                       int i;
+                       struct atl1_opt_list *ent;
+
+                       for (i = 0; i < opt->arg.l.nr; i++) {
+                               ent = &opt->arg.l.p[i];
+                               if (*value == ent->i) {
+                                       if (ent->str[0] != '\0')
+                                               dev_info(&pdev->dev, "%s\n",
+                                                       ent->str);
+                                       return 0;
+                               }
+                       }
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
+               opt->name, *value, opt->err);
+       *value = opt->def;
+       return -1;
+}
+
+/*
+ * atl1_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ */
+void __devinit atl1_check_options(struct atl1_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int bd = adapter->bd_number;
+       if (bd >= ATL1_MAX_NIC) {
+               dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
+               dev_notice(&pdev->dev, "using defaults for all values\n");
+       }
+       {                       /* Interrupt Moderate Timer */
+               struct atl1_option opt = {
+                       .type = range_option,
+                       .name = "Interrupt Moderator Timer",
+                       .err = "using default of "
+                               __MODULE_STRING(DEFAULT_INT_MOD_CNT),
+                       .def = DEFAULT_INT_MOD_CNT,
+                       .arg = {.r = {.min = MIN_INT_MOD_CNT,
+                                       .max = MAX_INT_MOD_CNT} }
+               };
+               int val;
+               if (num_int_mod_timer > bd) {
+                       val = int_mod_timer[bd];
+                       atl1_validate_option(&val, &opt, pdev);
+                       adapter->imt = (u16) val;
+               } else
+                       adapter->imt = (u16) (opt.def);
+       }
+}
+
 /*
  * atl1_pci_tbl - PCI Device ID Table
  */
index 4186326d1b9426d5c4f00c5324daf2919ceffeed..f06b854e2501b88ed7f3799bc53438357876add4 100644 (file)
@@ -253,181 +253,4 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
        atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp);
 }
 
-/*
- * This is the only thing that needs to be changed to adjust the
- * maximum number of ports that the driver can manage.
- */
-#define ATL1_MAX_NIC 4
-
-#define OPTION_UNSET    -1
-#define OPTION_DISABLED 0
-#define OPTION_ENABLED  1
-
-#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
-
-/*
- * Interrupt Moderate Timer in units of 2 us
- *
- * Valid Range: 10-65535
- *
- * Default Value: 100 (200us)
- */
-static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
-static int num_int_mod_timer;
-module_param_array_named(int_mod_timer, int_mod_timer, int,
-       &num_int_mod_timer, 0);
-MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
-
-/*
- * flash_vendor
- *
- * Valid Range: 0-2
- *
- * 0 - Atmel
- * 1 - SST
- * 2 - ST
- *
- * Default Value: 0
- */
-static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
-static int num_flash_vendor;
-module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
-MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
-
-#define DEFAULT_INT_MOD_CNT    100     /* 200us */
-#define MAX_INT_MOD_CNT                65000
-#define MIN_INT_MOD_CNT                50
-
-#define FLASH_VENDOR_DEFAULT   0
-#define FLASH_VENDOR_MIN       0
-#define FLASH_VENDOR_MAX       2
-
-struct atl1_option {
-       enum { enable_option, range_option, list_option } type;
-       char *name;
-       char *err;
-       int def;
-       union {
-               struct {        /* range_option info */
-                       int min;
-                       int max;
-               } r;
-               struct {        /* list_option info */
-                       int nr;
-                       struct atl1_opt_list {
-                               int i;
-                               char *str;
-                       } *p;
-               } l;
-       } arg;
-};
-
-static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
-       struct pci_dev *pdev)
-{
-       if (*value == OPTION_UNSET) {
-               *value = opt->def;
-               return 0;
-       }
-
-       switch (opt->type) {
-       case enable_option:
-               switch (*value) {
-               case OPTION_ENABLED:
-                       dev_info(&pdev->dev, "%s enabled\n", opt->name);
-                       return 0;
-               case OPTION_DISABLED:
-                       dev_info(&pdev->dev, "%s disabled\n", opt->name);
-                       return 0;
-               }
-               break;
-       case range_option:
-               if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
-                       dev_info(&pdev->dev, "%s set to %i\n", opt->name,
-                               *value);
-                       return 0;
-               }
-               break;
-       case list_option:{
-                       int i;
-                       struct atl1_opt_list *ent;
-
-                       for (i = 0; i < opt->arg.l.nr; i++) {
-                               ent = &opt->arg.l.p[i];
-                               if (*value == ent->i) {
-                                       if (ent->str[0] != '\0')
-                                               dev_info(&pdev->dev, "%s\n",
-                                                       ent->str);
-                                       return 0;
-                               }
-                       }
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
-               opt->name, *value, opt->err);
-       *value = opt->def;
-       return -1;
-}
-
-/*
- * atl1_check_options - Range Checking for Command Line Parameters
- * @adapter: board private structure
- *
- * This routine checks all command line parameters for valid user
- * input.  If an invalid value is given, or if no user specified
- * value exists, a default value is used.  The final value is stored
- * in a variable in the adapter structure.
- */
-void __devinit atl1_check_options(struct atl1_adapter *adapter)
-{
-       struct pci_dev *pdev = adapter->pdev;
-       int bd = adapter->bd_number;
-       if (bd >= ATL1_MAX_NIC) {
-               dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
-               dev_notice(&pdev->dev, "using defaults for all values\n");
-       }
-       {                       /* Interrupt Moderate Timer */
-               struct atl1_option opt = {
-                       .type = range_option,
-                       .name = "Interrupt Moderator Timer",
-                       .err = "using default of "
-                               __MODULE_STRING(DEFAULT_INT_MOD_CNT),
-                       .def = DEFAULT_INT_MOD_CNT,
-                       .arg = {.r = {.min = MIN_INT_MOD_CNT,
-                                       .max = MAX_INT_MOD_CNT} }
-               };
-               int val;
-               if (num_int_mod_timer > bd) {
-                       val = int_mod_timer[bd];
-                       atl1_validate_option(&val, &opt, pdev);
-                       adapter->imt = (u16) val;
-               } else
-                       adapter->imt = (u16) (opt.def);
-       }
-
-       {                       /* Flash Vendor */
-               struct atl1_option opt = {
-                       .type = range_option,
-                       .name = "SPI Flash Vendor",
-                       .err = "using default of "
-                               __MODULE_STRING(FLASH_VENDOR_DEFAULT),
-                       .def = DEFAULT_INT_MOD_CNT,
-                       .arg = {.r = {.min = FLASH_VENDOR_MIN,
-                                       .max = FLASH_VENDOR_MAX} }
-               };
-               int val;
-               if (num_flash_vendor > bd) {
-                       val = flash_vendor[bd];
-                       atl1_validate_option(&val, &opt, pdev);
-                       adapter->hw.flash_vendor = (u8) val;
-               } else
-                       adapter->hw.flash_vendor = (u8) (opt.def);
-       }
-}
-
 #endif /* ATLX_C */
index 194949afacd05142655f7d1a42e00b8b894f6697..0b4adf4a0f7da9ab772ca84bc1d4738ca034c263 100644 (file)
@@ -1005,3 +1005,4 @@ module_exit(axdrv_exit);
 MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ax88796");
index 717dcc1aa1e97acb8bfddf2e44e97cc51cb8a8f7..4fec8581bfd7bb80e5c7a49ae7c548532f263cf7 100644 (file)
@@ -47,6 +47,7 @@
 MODULE_AUTHOR(DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:bfin_mac");
 
 #if defined(CONFIG_BFIN_MAC_USE_L1)
 # define bfin_mac_alloc(dma_handle, size)  l1_data_sram_zalloc(size)
@@ -1089,8 +1090,9 @@ static struct platform_driver bfin_mac_driver = {
        .resume = bfin_mac_resume,
        .suspend = bfin_mac_suspend,
        .driver = {
-                  .name = DRV_NAME,
-                  },
+               .name = DRV_NAME,
+               .owner  = THIS_MODULE,
+       },
 };
 
 static int __init bfin_mac_init(void)
@@ -1106,3 +1108,4 @@ static void __exit bfin_mac_cleanup(void)
 }
 
 module_exit(bfin_mac_cleanup);
+
index 9da7ff437031fec11854152851c344608ec183ff..2b5740b3d1825dee5b91f9b3862060e71c266f8b 100644 (file)
@@ -42,6 +42,7 @@
 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cpmac");
 
 static int debug_level = 8;
 static int dumb_switch;
@@ -1103,6 +1104,7 @@ static int __devexit cpmac_remove(struct platform_device *pdev)
 
 static struct platform_driver cpmac_driver = {
        .driver.name = "cpmac",
+       .driver.owner = THIS_MODULE,
        .probe = cpmac_probe,
        .remove = __devexit_p(cpmac_remove),
 };
index d63cc93f055dfd2be2c8a493a514b20a743fd9fa..e6fe2614ea6dc3a9f2db6928cce6393b75498475 100644 (file)
@@ -1418,3 +1418,4 @@ module_exit(dm9000_cleanup);
 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
 MODULE_DESCRIPTION("Davicom DM9000 network driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dm9000");
index 01c88664bad30934e11d3c7ad9d4164d8e73bf7e..462351ca2c81ac991a088b971a0bc46b1f51c5a7 100644 (file)
@@ -1326,12 +1326,10 @@ struct e1000_info e1000_82571_info = {
        .mac                    = e1000_82571,
        .flags                  = FLAG_HAS_HW_VLAN_FILTER
                                  | FLAG_HAS_JUMBO_FRAMES
-                                 | FLAG_HAS_STATS_PTC_PRC
                                  | FLAG_HAS_WOL
                                  | FLAG_APME_IN_CTRL3
                                  | FLAG_RX_CSUM_ENABLED
                                  | FLAG_HAS_CTRLEXT_ON_LOAD
-                                 | FLAG_HAS_STATS_ICR_ICT
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_RESET_OVERWRITES_LAA /* errata */
                                  | FLAG_TARC_SPEED_MODE_BIT /* errata */
@@ -1347,12 +1345,10 @@ struct e1000_info e1000_82572_info = {
        .mac                    = e1000_82572,
        .flags                  = FLAG_HAS_HW_VLAN_FILTER
                                  | FLAG_HAS_JUMBO_FRAMES
-                                 | FLAG_HAS_STATS_PTC_PRC
                                  | FLAG_HAS_WOL
                                  | FLAG_APME_IN_CTRL3
                                  | FLAG_RX_CSUM_ENABLED
                                  | FLAG_HAS_CTRLEXT_ON_LOAD
-                                 | FLAG_HAS_STATS_ICR_ICT
                                  | FLAG_TARC_SPEED_MODE_BIT, /* errata */
        .pba                    = 38,
        .get_variants           = e1000_get_variants_82571,
@@ -1365,11 +1361,9 @@ struct e1000_info e1000_82573_info = {
        .mac                    = e1000_82573,
        .flags                  = FLAG_HAS_HW_VLAN_FILTER
                                  | FLAG_HAS_JUMBO_FRAMES
-                                 | FLAG_HAS_STATS_PTC_PRC
                                  | FLAG_HAS_WOL
                                  | FLAG_APME_IN_CTRL3
                                  | FLAG_RX_CSUM_ENABLED
-                                 | FLAG_HAS_STATS_ICR_ICT
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_ERT
index 572cfd44397adcf6ff506b742aebc201b1c325b6..2a53875cddbf5601ab6da33b89f36caf3e6f8650 100644 (file)
 #define E1000_SWFW_EEP_SM   0x1
 #define E1000_SWFW_PHY0_SM  0x2
 #define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_CSR_SM   0x8
 
 /* Device Control */
 #define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
 #define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
 #define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
 #define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
 #define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
 #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
 
 /* NVM Control */
 #define E1000_EECD_SK        0x00000001 /* NVM Clock */
index 5a89dff522641c5ea28351cb06e99c19e1092a65..38bfd0d261fee3f272f80415da323311eb8ddf43 100644 (file)
@@ -64,11 +64,14 @@ struct e1000_info;
 /* Tx/Rx descriptor defines */
 #define E1000_DEFAULT_TXD              256
 #define E1000_MAX_TXD                  4096
-#define E1000_MIN_TXD                  80
+#define E1000_MIN_TXD                  64
 
 #define E1000_DEFAULT_RXD              256
 #define E1000_MAX_RXD                  4096
-#define E1000_MIN_RXD                  80
+#define E1000_MIN_RXD                  64
+
+#define E1000_MIN_ITR_USECS            10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS            10000 /* 100    irq/sec */
 
 /* Early Receive defines */
 #define E1000_ERT_2048                 0x100
@@ -147,6 +150,18 @@ struct e1000_ring {
        struct e1000_queue_stats stats;
 };
 
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+       u16 bmcr;               /* basic mode control register    */
+       u16 bmsr;               /* basic mode status register     */
+       u16 advertise;          /* auto-negotiation advertisement */
+       u16 lpa;                /* link partner ability register  */
+       u16 expansion;          /* auto-negotiation expansion reg */
+       u16 ctrl1000;           /* 1000BASE-T control register    */
+       u16 stat1000;           /* 1000BASE-T status register     */
+       u16 estatus;            /* extended status register       */
+};
+
 /* board specific private data structure */
 struct e1000_adapter {
        struct timer_list watchdog_timer;
@@ -202,8 +217,8 @@ struct e1000_adapter {
        /* Tx stats */
        u64 tpt_old;
        u64 colc_old;
-       u64 gotcl_old;
-       u32 gotcl;
+       u32 gotc;
+       u64 gotc_old;
        u32 tx_timeout_count;
        u32 tx_fifo_head;
        u32 tx_head_addr;
@@ -227,8 +242,8 @@ struct e1000_adapter {
        u64 hw_csum_err;
        u64 hw_csum_good;
        u64 rx_hdr_split;
-       u64 gorcl_old;
-       u32 gorcl;
+       u32 gorc;
+       u64 gorc_old;
        u32 alloc_rx_buff_failed;
        u32 rx_dma_failed;
 
@@ -250,6 +265,9 @@ struct e1000_adapter {
        struct e1000_phy_info phy_info;
        struct e1000_phy_stats phy_stats;
 
+       /* Snapshot of PHY registers */
+       struct e1000_phy_regs phy_regs;
+
        struct e1000_ring test_tx_ring;
        struct e1000_ring test_rx_ring;
        u32 test_icr;
@@ -286,8 +304,6 @@ struct e1000_info {
 #define FLAG_HAS_CTRLEXT_ON_LOAD          (1 << 5)
 #define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
 #define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
-#define FLAG_HAS_STATS_ICR_ICT            (1 << 9)
-#define FLAG_HAS_STATS_PTC_PRC            (1 << 10)
 #define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
 #define FLAG_IS_QUAD_PORT_A               (1 << 12)
 #define FLAG_IS_QUAD_PORT                 (1 << 13)
@@ -433,6 +449,8 @@ extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
                               u32 usec_interval, bool *success);
 extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_check_downshift(struct e1000_hw *hw);
 
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
index d59a99ae44bee83b6afa7aff45315ae828f9c9b7..dc552d7d6fac5d083671f66c17b882fe505798ae 100644 (file)
@@ -41,6 +41,7 @@
 #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL      0x00
 #define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL       0x02
 #define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL        0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE         0x1F
 
 #define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS   0x0008
 #define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS   0x0800
@@ -48,6 +49,7 @@
 
 #define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
 #define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT  0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE                 0x2000
 
 #define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
 #define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN       0x00010000
@@ -85,6 +87,9 @@
 /* Kumeran Mode Control Register (Page 193, Register 16) */
 #define GG82563_KMCR_PASS_FALSE_CARRIER                 0x0800
 
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY  0x5
+
 /* Power Management Control Register (Page 193, Register 20) */
 #define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE     0x0001
                                           /* 1=Enable SERDES Electrical Idle */
@@ -270,6 +275,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
        u16 mask;
 
        mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+       mask |= E1000_SWFW_CSR_SM;
 
        return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
 }
@@ -286,6 +292,8 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
        u16 mask;
 
        mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+       mask |= E1000_SWFW_CSR_SM;
+
        e1000_release_swfw_sync_80003es2lan(hw, mask);
 }
 
@@ -410,20 +418,27 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
        u32 page_select;
        u16 temp;
 
+       ret_val = e1000_acquire_phy_80003es2lan(hw);
+       if (ret_val)
+               return ret_val;
+
        /* Select Configuration Page */
-       if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
+       if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
                page_select = GG82563_PHY_PAGE_SELECT;
-       else
+       } else {
                /*
                 * Use Alternative Page Select register to access
                 * registers 30 and 31
                 */
                page_select = GG82563_PHY_PAGE_SELECT_ALT;
+       }
 
        temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
-       ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp);
-       if (ret_val)
+       ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+       if (ret_val) {
+               e1000_release_phy_80003es2lan(hw);
                return ret_val;
+       }
 
        /*
         * The "ready" bit in the MDIC register may be incorrectly set
@@ -433,20 +448,21 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
        udelay(200);
 
        /* ...and verify the command was successful. */
-       ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp);
+       ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
 
        if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
                ret_val = -E1000_ERR_PHY;
+               e1000_release_phy_80003es2lan(hw);
                return ret_val;
        }
 
        udelay(200);
 
-       ret_val = e1000e_read_phy_reg_m88(hw,
-                                        MAX_PHY_REG_ADDRESS & offset,
-                                        data);
+       ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
 
        udelay(200);
+       e1000_release_phy_80003es2lan(hw);
 
        return ret_val;
 }
@@ -467,20 +483,27 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
        u32 page_select;
        u16 temp;
 
+       ret_val = e1000_acquire_phy_80003es2lan(hw);
+       if (ret_val)
+               return ret_val;
+
        /* Select Configuration Page */
-       if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
+       if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
                page_select = GG82563_PHY_PAGE_SELECT;
-       else
+       } else {
                /*
                 * Use Alternative Page Select register to access
                 * registers 30 and 31
                 */
                page_select = GG82563_PHY_PAGE_SELECT_ALT;
+       }
 
        temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
-       ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp);
-       if (ret_val)
+       ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+       if (ret_val) {
+               e1000_release_phy_80003es2lan(hw);
                return ret_val;
+       }
 
 
        /*
@@ -491,18 +514,20 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
        udelay(200);
 
        /* ...and verify the command was successful. */
-       ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp);
+       ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
 
-       if (((u16)offset >> GG82563_PAGE_SHIFT) != temp)
+       if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+               e1000_release_phy_80003es2lan(hw);
                return -E1000_ERR_PHY;
+       }
 
        udelay(200);
 
-       ret_val = e1000e_write_phy_reg_m88(hw,
-                                         MAX_PHY_REG_ADDRESS & offset,
-                                         data);
+       ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                           data);
 
        udelay(200);
+       e1000_release_phy_80003es2lan(hw);
 
        return ret_val;
 }
@@ -882,10 +907,10 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
        struct e1000_phy_info *phy = &hw->phy;
        s32 ret_val;
        u32 ctrl_ext;
-       u16 data;
+       u32 i = 0;
+       u16 data, data2;
 
-       ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL,
-                                    &data);
+       ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
        if (ret_val)
                return ret_val;
 
@@ -893,8 +918,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
        /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
        data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
 
-       ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL,
-                                     data);
+       ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
        if (ret_val)
                return ret_val;
 
@@ -954,6 +978,18 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
+       ret_val = e1000e_read_kmrn_reg(hw,
+                                      E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+                                      &data);
+       if (ret_val)
+               return ret_val;
+       data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+       ret_val = e1000e_write_kmrn_reg(hw,
+                                       E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+                                       data);
+       if (ret_val)
+               return ret_val;
+
        ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
        if (ret_val)
                return ret_val;
@@ -983,9 +1019,18 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
                if (ret_val)
                        return ret_val;
 
-               ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
-               if (ret_val)
-                       return ret_val;
+               do {
+                       ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                          &data);
+                       if (ret_val)
+                               return ret_val;
+
+                       ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                          &data2);
+                       if (ret_val)
+                               return ret_val;
+                       i++;
+               } while ((data != data2) && (i < GG82563_MAX_KMRN_RETRY));
 
                data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
                ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
@@ -1074,7 +1119,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
 {
        s32 ret_val;
        u32 tipg;
-       u16 reg_data;
+       u32 i = 0;
+       u16 reg_data, reg_data2;
 
        reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
@@ -1088,9 +1134,16 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
        tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
        ew32(TIPG, tipg);
 
-       ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
-       if (ret_val)
-               return ret_val;
+       do {
+               ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+               if (ret_val)
+                       return ret_val;
+
+               ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+               if (ret_val)
+                       return ret_val;
+               i++;
+       } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
 
        if (duplex == HALF_DUPLEX)
                reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
@@ -1112,8 +1165,9 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
 static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
 {
        s32 ret_val;
-       u16 reg_data;
+       u16 reg_data, reg_data2;
        u32 tipg;
+       u32 i = 0;
 
        reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
@@ -1127,9 +1181,16 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
        tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
        ew32(TIPG, tipg);
 
-       ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
-       if (ret_val)
-               return ret_val;
+       do {
+               ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+               if (ret_val)
+                       return ret_val;
+
+               ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+               if (ret_val)
+                       return ret_val;
+               i++;
+       } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
 
        reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
        ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
@@ -1231,12 +1292,10 @@ struct e1000_info e1000_es2_info = {
        .mac                    = e1000_80003es2lan,
        .flags                  = FLAG_HAS_HW_VLAN_FILTER
                                  | FLAG_HAS_JUMBO_FRAMES
-                                 | FLAG_HAS_STATS_PTC_PRC
                                  | FLAG_HAS_WOL
                                  | FLAG_APME_IN_CTRL3
                                  | FLAG_RX_CSUM_ENABLED
                                  | FLAG_HAS_CTRLEXT_ON_LOAD
-                                 | FLAG_HAS_STATS_ICR_ICT
                                  | FLAG_RX_NEEDS_RESTART /* errata */
                                  | FLAG_TARC_SET_BIT_ZERO /* errata */
                                  | FLAG_APME_CHECK_PORT_B
index 6d1b257bbda6b37d3ea38b1034ef02f20b72f1a3..ce045acce63e99b79685e291a8993e3acd940508 100644 (file)
@@ -46,8 +46,8 @@ struct e1000_stats {
 static const struct e1000_stats e1000_gstrings_stats[] = {
        { "rx_packets", E1000_STAT(stats.gprc) },
        { "tx_packets", E1000_STAT(stats.gptc) },
-       { "rx_bytes", E1000_STAT(stats.gorcl) },
-       { "tx_bytes", E1000_STAT(stats.gotcl) },
+       { "rx_bytes", E1000_STAT(stats.gorc) },
+       { "tx_bytes", E1000_STAT(stats.gotc) },
        { "rx_broadcast", E1000_STAT(stats.bprc) },
        { "tx_broadcast", E1000_STAT(stats.bptc) },
        { "rx_multicast", E1000_STAT(stats.mprc) },
@@ -83,7 +83,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
        { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
        { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
-       { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+       { "rx_long_byte_count", E1000_STAT(stats.gorc) },
        { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
        { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
        { "rx_header_split", E1000_STAT(rx_hdr_split) },
@@ -1770,6 +1770,47 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
        return 0;
 }
 
+static int e1000_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *ec)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (adapter->itr_setting <= 3)
+               ec->rx_coalesce_usecs = adapter->itr_setting;
+       else
+               ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+       return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *ec)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+
+       if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+           ((ec->rx_coalesce_usecs > 3) &&
+            (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+           (ec->rx_coalesce_usecs == 2))
+               return -EINVAL;
+
+       if (ec->rx_coalesce_usecs <= 3) {
+               adapter->itr = 20000;
+               adapter->itr_setting = ec->rx_coalesce_usecs;
+       } else {
+               adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+               adapter->itr_setting = adapter->itr & ~3;
+       }
+
+       if (adapter->itr_setting != 0)
+               ew32(ITR, 1000000000 / (adapter->itr * 256));
+       else
+               ew32(ITR, 0);
+
+       return 0;
+}
+
 static int e1000_nway_reset(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1845,6 +1886,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
        .phys_id                = e1000_phys_id,
        .get_ethtool_stats      = e1000_get_ethtool_stats,
        .get_sset_count         = e1000e_get_sset_count,
+       .get_coalesce           = e1000_get_coalesce,
+       .set_coalesce           = e1000_set_coalesce,
 };
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
index 53f1ac6327fabcd92312ca3d4733c7bb5983f773..a930e6d9cf02bdd34d8beb88d67f84c47d8fb6d1 100644 (file)
@@ -592,10 +592,8 @@ struct e1000_hw_stats {
        u64 bprc;
        u64 mprc;
        u64 gptc;
-       u64 gorcl;
-       u64 gorch;
-       u64 gotcl;
-       u64 gotch;
+       u64 gorc;
+       u64 gotc;
        u64 rnbc;
        u64 ruc;
        u64 rfc;
@@ -604,10 +602,8 @@ struct e1000_hw_stats {
        u64 mgprc;
        u64 mgpdc;
        u64 mgptc;
-       u64 torl;
-       u64 torh;
-       u64 totl;
-       u64 toth;
+       u64 tor;
+       u64 tot;
        u64 tpr;
        u64 tpt;
        u64 ptc64;
index c8dc47fd132a47e74401a0c103efb23a18b97934..8991ab8911e2f506e1d2be85fbad7a7f4a8ad4fb 100644 (file)
@@ -46,7 +46,7 @@
 
 #include "e1000.h"
 
-#define DRV_VERSION "0.2.0"
+#define DRV_VERSION "0.2.1"
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -466,10 +466,10 @@ next_desc:
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, cleaned_count);
 
-       adapter->total_rx_packets += total_rx_packets;
        adapter->total_rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       adapter->total_rx_packets += total_rx_packets;
        adapter->net_stats.rx_bytes += total_rx_bytes;
+       adapter->net_stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -606,8 +606,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
-       adapter->net_stats.tx_packets += total_tx_packets;
        adapter->net_stats.tx_bytes += total_tx_bytes;
+       adapter->net_stats.tx_packets += total_tx_packets;
        return cleaned;
 }
 
@@ -775,10 +775,10 @@ next_desc:
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, cleaned_count);
 
-       adapter->total_rx_packets += total_rx_packets;
        adapter->total_rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       adapter->total_rx_packets += total_rx_packets;
        adapter->net_stats.rx_bytes += total_rx_bytes;
+       adapter->net_stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -2506,56 +2506,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 
        adapter->stats.crcerrs += er32(CRCERRS);
        adapter->stats.gprc += er32(GPRC);
-       adapter->stats.gorcl += er32(GORCL);
-       adapter->stats.gorch += er32(GORCH);
+       adapter->stats.gorc += er32(GORCL);
+       er32(GORCH); /* Clear gorc */
        adapter->stats.bprc += er32(BPRC);
        adapter->stats.mprc += er32(MPRC);
        adapter->stats.roc += er32(ROC);
 
-       if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
-               adapter->stats.prc64 += er32(PRC64);
-               adapter->stats.prc127 += er32(PRC127);
-               adapter->stats.prc255 += er32(PRC255);
-               adapter->stats.prc511 += er32(PRC511);
-               adapter->stats.prc1023 += er32(PRC1023);
-               adapter->stats.prc1522 += er32(PRC1522);
-               adapter->stats.symerrs += er32(SYMERRS);
-               adapter->stats.sec += er32(SEC);
-       }
-
        adapter->stats.mpc += er32(MPC);
        adapter->stats.scc += er32(SCC);
        adapter->stats.ecol += er32(ECOL);
        adapter->stats.mcc += er32(MCC);
        adapter->stats.latecol += er32(LATECOL);
        adapter->stats.dc += er32(DC);
-       adapter->stats.rlec += er32(RLEC);
        adapter->stats.xonrxc += er32(XONRXC);
        adapter->stats.xontxc += er32(XONTXC);
        adapter->stats.xoffrxc += er32(XOFFRXC);
        adapter->stats.xofftxc += er32(XOFFTXC);
-       adapter->stats.fcruc += er32(FCRUC);
        adapter->stats.gptc += er32(GPTC);
-       adapter->stats.gotcl += er32(GOTCL);
-       adapter->stats.gotch += er32(GOTCH);
+       adapter->stats.gotc += er32(GOTCL);
+       er32(GOTCH); /* Clear gotc */
        adapter->stats.rnbc += er32(RNBC);
        adapter->stats.ruc += er32(RUC);
-       adapter->stats.rfc += er32(RFC);
-       adapter->stats.rjc += er32(RJC);
-       adapter->stats.torl += er32(TORL);
-       adapter->stats.torh += er32(TORH);
-       adapter->stats.totl += er32(TOTL);
-       adapter->stats.toth += er32(TOTH);
-       adapter->stats.tpr += er32(TPR);
-
-       if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
-               adapter->stats.ptc64 += er32(PTC64);
-               adapter->stats.ptc127 += er32(PTC127);
-               adapter->stats.ptc255 += er32(PTC255);
-               adapter->stats.ptc511 += er32(PTC511);
-               adapter->stats.ptc1023 += er32(PTC1023);
-               adapter->stats.ptc1522 += er32(PTC1522);
-       }
 
        adapter->stats.mptc += er32(MPTC);
        adapter->stats.bptc += er32(BPTC);
@@ -2574,19 +2545,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.tsctc += er32(TSCTC);
        adapter->stats.tsctfc += er32(TSCTFC);
 
-       adapter->stats.iac += er32(IAC);
-
-       if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) {
-               adapter->stats.icrxoc += er32(ICRXOC);
-               adapter->stats.icrxptc += er32(ICRXPTC);
-               adapter->stats.icrxatc += er32(ICRXATC);
-               adapter->stats.ictxptc += er32(ICTXPTC);
-               adapter->stats.ictxatc += er32(ICTXATC);
-               adapter->stats.ictxqec += er32(ICTXQEC);
-               adapter->stats.ictxqmtc += er32(ICTXQMTC);
-               adapter->stats.icrxdmtc += er32(ICRXDMTC);
-       }
-
        /* Fill out the OS statistics structure */
        adapter->net_stats.multicast = adapter->stats.mprc;
        adapter->net_stats.collisions = adapter->stats.colc;
@@ -2633,6 +2591,54 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
 }
 
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_phy_regs *phy = &adapter->phy_regs;
+       int ret_val;
+       unsigned long irq_flags;
+
+
+       spin_lock_irqsave(&adapter->stats_lock, irq_flags);
+
+       if ((er32(STATUS) & E1000_STATUS_LU) &&
+           (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+               ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
+               ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
+               ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
+               ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
+               ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
+               ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
+               ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
+               ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+               if (ret_val)
+                       ndev_warn(adapter->netdev,
+                                 "Error reading PHY register\n");
+       } else {
+               /*
+                * Do not read PHY registers if link is not up
+                * Set values to typical power-on defaults
+                */
+               phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+               phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+                            BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+                            BMSR_ERCAP);
+               phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+                                 ADVERTISE_ALL | ADVERTISE_CSMA);
+               phy->lpa = 0;
+               phy->expansion = EXPANSION_ENABLENPAGE;
+               phy->ctrl1000 = ADVERTISE_1000FULL;
+               phy->stat1000 = 0;
+               phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+       }
+
+       spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
+}
+
 static void e1000_print_link_info(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -2745,6 +2751,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                if (!netif_carrier_ok(netdev)) {
                        bool txb2b = 1;
                        /* update snapshot of PHY registers on LSC */
+                       e1000_phy_read_status(adapter);
                        mac->ops.get_link_up_info(&adapter->hw,
                                                   &adapter->link_speed,
                                                   &adapter->link_duplex);
@@ -2842,10 +2849,10 @@ link_up:
        mac->collision_delta = adapter->stats.colc - adapter->colc_old;
        adapter->colc_old = adapter->stats.colc;
 
-       adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
-       adapter->gorcl_old = adapter->stats.gorcl;
-       adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
-       adapter->gotcl_old = adapter->stats.gotcl;
+       adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+       adapter->gorc_old = adapter->stats.gorc;
+       adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+       adapter->gotc_old = adapter->stats.gotc;
 
        e1000e_update_adaptive(&adapter->hw);
 
@@ -3500,7 +3507,6 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct mii_ioctl_data *data = if_mii(ifr);
-       unsigned long irq_flags;
 
        if (adapter->hw.phy.media_type != e1000_media_type_copper)
                return -EOPNOTSUPP;
@@ -3512,13 +3518,40 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
        case SIOCGMIIREG:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-               spin_lock_irqsave(&adapter->stats_lock, irq_flags);
-               if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F,
-                                  &data->val_out)) {
-                       spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
+               switch (data->reg_num & 0x1F) {
+               case MII_BMCR:
+                       data->val_out = adapter->phy_regs.bmcr;
+                       break;
+               case MII_BMSR:
+                       data->val_out = adapter->phy_regs.bmsr;
+                       break;
+               case MII_PHYSID1:
+                       data->val_out = (adapter->hw.phy.id >> 16);
+                       break;
+               case MII_PHYSID2:
+                       data->val_out = (adapter->hw.phy.id & 0xFFFF);
+                       break;
+               case MII_ADVERTISE:
+                       data->val_out = adapter->phy_regs.advertise;
+                       break;
+               case MII_LPA:
+                       data->val_out = adapter->phy_regs.lpa;
+                       break;
+               case MII_EXPANSION:
+                       data->val_out = adapter->phy_regs.expansion;
+                       break;
+               case MII_CTRL1000:
+                       data->val_out = adapter->phy_regs.ctrl1000;
+                       break;
+               case MII_STAT1000:
+                       data->val_out = adapter->phy_regs.stat1000;
+                       break;
+               case MII_ESTATUS:
+                       data->val_out = adapter->phy_regs.estatus;
+                       break;
+               default:
                        return -EIO;
                }
-               spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
                break;
        case SIOCSMIIREG:
        default:
@@ -3774,6 +3807,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
        }
        pci_set_master(pdev);
+       pci_restore_state(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -3900,6 +3934,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                goto err_pci_reg;
 
        pci_set_master(pdev);
+       pci_save_state(pdev);
 
        err = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
index 3a4574caa75bb1fdcd824d5b3853141773fda532..e102332a6beed4e49588f2c37063a0429fef85f8 100644 (file)
@@ -116,7 +116,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_read_phy_reg_mdic - Read MDI control register
+ *  e1000e_read_phy_reg_mdic - Read MDI control register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
@@ -124,7 +124,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
  *  Reads the MDI control register in the PHY at offset and stores the
  *  information read to data.
  **/
-static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 {
        struct e1000_phy_info *phy = &hw->phy;
        u32 i, mdic = 0;
@@ -150,7 +150,7 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
         * Increasing the time out as testing showed failures with
         * the lower time out
         */
-       for (i = 0; i < 64; i++) {
+       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
                udelay(50);
                mdic = er32(MDIC);
                if (mdic & E1000_MDIC_READY)
@@ -170,14 +170,14 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 }
 
 /**
- *  e1000_write_phy_reg_mdic - Write MDI control register
+ *  e1000e_write_phy_reg_mdic - Write MDI control register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to write to
  *  @data: data to write to register at offset
  *
  *  Writes data to MDI control register in the PHY at offset.
  **/
-static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 {
        struct e1000_phy_info *phy = &hw->phy;
        u32 i, mdic = 0;
@@ -199,9 +199,13 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 
        ew32(MDIC, mdic);
 
-       /* Poll the ready bit to see if the MDI read completed */
-       for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
-               udelay(5);
+       /*
+        * Poll the ready bit to see if the MDI read completed
+        * Increasing the time out as testing showed failures with
+        * the lower time out
+        */
+       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+               udelay(50);
                mdic = er32(MDIC);
                if (mdic & E1000_MDIC_READY)
                        break;
@@ -210,6 +214,10 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
                hw_dbg(hw, "MDI Write did not complete\n");
                return -E1000_ERR_PHY;
        }
+       if (mdic & E1000_MDIC_ERROR) {
+               hw_dbg(hw, "MDI Error\n");
+               return -E1000_ERR_PHY;
+       }
 
        return 0;
 }
@@ -232,9 +240,8 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
        if (ret_val)
                return ret_val;
 
-       ret_val = e1000_read_phy_reg_mdic(hw,
-                                         MAX_PHY_REG_ADDRESS & offset,
-                                         data);
+       ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
 
        hw->phy.ops.release_phy(hw);
 
@@ -258,9 +265,8 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
        if (ret_val)
                return ret_val;
 
-       ret_val = e1000_write_phy_reg_mdic(hw,
-                                          MAX_PHY_REG_ADDRESS & offset,
-                                          data);
+       ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                           data);
 
        hw->phy.ops.release_phy(hw);
 
@@ -286,18 +292,17 @@ s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
                return ret_val;
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
-               ret_val = e1000_write_phy_reg_mdic(hw,
-                                                  IGP01E1000_PHY_PAGE_SELECT,
-                                                  (u16)offset);
+               ret_val = e1000e_write_phy_reg_mdic(hw,
+                                                   IGP01E1000_PHY_PAGE_SELECT,
+                                                   (u16)offset);
                if (ret_val) {
                        hw->phy.ops.release_phy(hw);
                        return ret_val;
                }
        }
 
-       ret_val = e1000_read_phy_reg_mdic(hw,
-                                         MAX_PHY_REG_ADDRESS & offset,
-                                         data);
+       ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
 
        hw->phy.ops.release_phy(hw);
 
@@ -322,18 +327,17 @@ s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
                return ret_val;
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
-               ret_val = e1000_write_phy_reg_mdic(hw,
-                                                  IGP01E1000_PHY_PAGE_SELECT,
-                                                  (u16)offset);
+               ret_val = e1000e_write_phy_reg_mdic(hw,
+                                                   IGP01E1000_PHY_PAGE_SELECT,
+                                                   (u16)offset);
                if (ret_val) {
                        hw->phy.ops.release_phy(hw);
                        return ret_val;
                }
        }
 
-       ret_val = e1000_write_phy_reg_mdic(hw,
-                                          MAX_PHY_REG_ADDRESS & offset,
-                                          data);
+       ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                           data);
 
        hw->phy.ops.release_phy(hw);
 
@@ -420,7 +424,9 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
-       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+       /* For newer PHYs this bit is downshift enable */
+       if (phy->type == e1000_phy_m88)
+               phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
 
        /*
         * Options:
@@ -463,7 +469,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
-       if (phy->revision < 4) {
+       if ((phy->type == e1000_phy_m88) && (phy->revision < 4)) {
                /*
                 * Force TX_CLK in the Extended PHY Specific Control Register
                 * to 25MHz clock.
@@ -518,8 +524,11 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
                return ret_val;
        }
 
-       /* Wait 15ms for MAC to configure PHY from NVM settings. */
-       msleep(15);
+       /*
+        * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+        * timeout issues when LFS is enabled.
+        */
+       msleep(100);
 
        /* disable lplu d0 during driver init */
        ret_val = e1000_set_d0_lplu_state(hw, 0);
@@ -1152,9 +1161,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
 
        if (!active) {
                data &= ~IGP02E1000_PM_D3_LPLU;
-               ret_val = e1e_wphy(hw,
-                                            IGP02E1000_PHY_POWER_MGMT,
-                                            data);
+               ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
                if (ret_val)
                        return ret_val;
                /*
index 9ff7538b75952e94ef27d9660dd31fa00f75b605..f9bc21c74b59222a7c8775dfaba80a91d9abb626 100644 (file)
@@ -2611,7 +2611,7 @@ static int ehea_stop(struct net_device *dev)
        return ret;
 }
 
-void ehea_purge_sq(struct ehea_qp *orig_qp)
+static void ehea_purge_sq(struct ehea_qp *orig_qp)
 {
        struct ehea_qp qp = *orig_qp;
        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2625,7 +2625,7 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
        }
 }
 
-void ehea_flush_sq(struct ehea_port *port)
+static void ehea_flush_sq(struct ehea_port *port)
 {
        int i;
 
index 8c4214b0ee1f2ff7d6bc7f04bd497e07ef613f35..35f66d4a4595d5148f4affbc167897c60ca49792 100644 (file)
@@ -96,6 +96,7 @@
 #define DEV_HAS_PAUSEFRAME_TX_V2   0x10000  /* device supports tx pause frames version 2 */
 #define DEV_HAS_PAUSEFRAME_TX_V3   0x20000  /* device supports tx pause frames version 3 */
 #define DEV_NEED_TX_LIMIT          0x40000  /* device needs to limit tx */
+#define DEV_HAS_GEAR_MODE          0x80000  /* device supports gear mode */
 
 enum {
        NvRegIrqStatus = 0x000,
@@ -174,11 +175,13 @@ enum {
        NvRegReceiverStatus = 0x98,
 #define NVREG_RCVSTAT_BUSY     0x01
 
-       NvRegRandomSeed = 0x9c,
-#define NVREG_RNDSEED_MASK     0x00ff
-#define NVREG_RNDSEED_FORCE    0x7f00
-#define NVREG_RNDSEED_FORCE2   0x2d00
-#define NVREG_RNDSEED_FORCE3   0x7400
+       NvRegSlotTime = 0x9c,
+#define NVREG_SLOTTIME_LEGBF_ENABLED   0x80000000
+#define NVREG_SLOTTIME_10_100_FULL     0x00007f00
+#define NVREG_SLOTTIME_1000_FULL       0x0003ff00
+#define NVREG_SLOTTIME_HALF            0x0000ff00
+#define NVREG_SLOTTIME_DEFAULT         0x00007f00
+#define NVREG_SLOTTIME_MASK            0x000000ff
 
        NvRegTxDeferral = 0xA0,
 #define NVREG_TX_DEFERRAL_DEFAULT              0x15050f
@@ -201,6 +204,11 @@ enum {
 
        NvRegPhyInterface = 0xC0,
 #define PHY_RGMII              0x10000000
+       NvRegBackOffControl = 0xC4,
+#define NVREG_BKOFFCTRL_DEFAULT                        0x70000000
+#define NVREG_BKOFFCTRL_SEED_MASK              0x000003ff
+#define NVREG_BKOFFCTRL_SELECT                 24
+#define NVREG_BKOFFCTRL_GEAR                   12
 
        NvRegTxRingPhysAddr = 0x100,
        NvRegRxRingPhysAddr = 0x104,
@@ -352,6 +360,7 @@ union ring_type {
 
 #define NV_TX_LASTPACKET       (1<<16)
 #define NV_TX_RETRYERROR       (1<<19)
+#define NV_TX_RETRYCOUNT_MASK  (0xF<<20)
 #define NV_TX_FORCED_INTERRUPT (1<<24)
 #define NV_TX_DEFERRED         (1<<26)
 #define NV_TX_CARRIERLOST      (1<<27)
@@ -362,6 +371,7 @@ union ring_type {
 
 #define NV_TX2_LASTPACKET      (1<<29)
 #define NV_TX2_RETRYERROR      (1<<18)
+#define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
 #define NV_TX2_FORCED_INTERRUPT        (1<<30)
 #define NV_TX2_DEFERRED                (1<<25)
 #define NV_TX2_CARRIERLOST     (1<<26)
@@ -473,16 +483,22 @@ union ring_type {
 #define DESC_VER_3     3
 
 /* PHY defines */
-#define PHY_OUI_MARVELL        0x5043
-#define PHY_OUI_CICADA 0x03f1
-#define PHY_OUI_VITESSE        0x01c1
-#define PHY_OUI_REALTEK        0x0732
+#define PHY_OUI_MARVELL                0x5043
+#define PHY_OUI_CICADA         0x03f1
+#define PHY_OUI_VITESSE                0x01c1
+#define PHY_OUI_REALTEK                0x0732
+#define PHY_OUI_REALTEK2       0x0020
 #define PHYID1_OUI_MASK        0x03ff
 #define PHYID1_OUI_SHFT        6
 #define PHYID2_OUI_MASK        0xfc00
 #define PHYID2_OUI_SHFT        10
 #define PHYID2_MODEL_MASK              0x03f0
-#define PHY_MODEL_MARVELL_E3016                0x220
+#define PHY_MODEL_REALTEK_8211         0x0110
+#define PHY_REV_MASK                   0x0001
+#define PHY_REV_REALTEK_8211B          0x0000
+#define PHY_REV_REALTEK_8211C          0x0001
+#define PHY_MODEL_REALTEK_8201         0x0200
+#define PHY_MODEL_MARVELL_E3016                0x0220
 #define PHY_MARVELL_E3016_INITMASK     0x0300
 #define PHY_CICADA_INIT1       0x0f000
 #define PHY_CICADA_INIT2       0x0e00
@@ -509,10 +525,18 @@ union ring_type {
 #define PHY_REALTEK_INIT_REG1  0x1f
 #define PHY_REALTEK_INIT_REG2  0x19
 #define PHY_REALTEK_INIT_REG3  0x13
+#define PHY_REALTEK_INIT_REG4  0x14
+#define PHY_REALTEK_INIT_REG5  0x18
+#define PHY_REALTEK_INIT_REG6  0x11
 #define PHY_REALTEK_INIT1      0x0000
 #define PHY_REALTEK_INIT2      0x8e00
 #define PHY_REALTEK_INIT3      0x0001
 #define PHY_REALTEK_INIT4      0xad17
+#define PHY_REALTEK_INIT5      0xfb54
+#define PHY_REALTEK_INIT6      0xf5c7
+#define PHY_REALTEK_INIT7      0x1000
+#define PHY_REALTEK_INIT8      0x0003
+#define PHY_REALTEK_INIT_MSK1  0x0003
 
 #define PHY_GIGABIT    0x0100
 
@@ -691,6 +715,7 @@ struct fe_priv {
        int wolenabled;
        unsigned int phy_oui;
        unsigned int phy_model;
+       unsigned int phy_rev;
        u16 gigabit;
        int intr_test;
        int recover_error;
@@ -704,6 +729,7 @@ struct fe_priv {
        u32 txrxctl_bits;
        u32 vlanctl_bits;
        u32 driver_data;
+       u32 device_id;
        u32 register_size;
        int rx_csum;
        u32 mac_in_use;
@@ -814,6 +840,16 @@ enum {
 };
 static int dma_64bit = NV_DMA_64BIT_ENABLED;
 
+/*
+ * Crossover Detection
+ * Realtek 8201 phy + some OEM boards do not work properly.
+ */
+enum {
+       NV_CROSSOVER_DETECTION_DISABLED,
+       NV_CROSSOVER_DETECTION_ENABLED
+};
+static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
+
 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 {
        return netdev_priv(dev);
@@ -1078,25 +1114,53 @@ static int phy_init(struct net_device *dev)
                }
        }
        if (np->phy_oui == PHY_OUI_REALTEK) {
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
-               }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
-               }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
-               }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
+               if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+                   np->phy_rev == PHY_REV_REALTEK_8211B) {
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
                }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
+               if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+                       if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
+                               phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+                               phy_reserved |= PHY_REALTEK_INIT7;
+                               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
+                                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                                       return PHY_ERROR;
+                               }
+                       }
                }
        }
 
@@ -1236,26 +1300,71 @@ static int phy_init(struct net_device *dev)
                }
        }
        if (np->phy_oui == PHY_OUI_REALTEK) {
-               /* reset could have cleared these out, set them back */
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
-               }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
-               }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
-               }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
+               if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+                   np->phy_rev == PHY_REV_REALTEK_8211B) {
+                       /* reset could have cleared these out, set them back */
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
+                       if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+                               printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                               return PHY_ERROR;
+                       }
                }
-               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-                       return PHY_ERROR;
+               if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+                       if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
+                           np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
+                               phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+                               phy_reserved |= PHY_REALTEK_INIT7;
+                               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
+                                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                                       return PHY_ERROR;
+                               }
+                       }
+                       if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+                               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+                                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                                       return PHY_ERROR;
+                               }
+                               phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
+                               phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+                               phy_reserved |= PHY_REALTEK_INIT3;
+                               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
+                                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                                       return PHY_ERROR;
+                               }
+                               if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+                                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+                                       return PHY_ERROR;
+                               }
+                       }
                }
        }
 
@@ -1769,6 +1878,115 @@ static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
        return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
 }
 
+static void nv_legacybackoff_reseed(struct net_device *dev)
+{
+       u8 __iomem *base = get_hwbase(dev);
+       u32 reg;
+       u32 low;
+       int tx_status = 0;
+
+       reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
+       get_random_bytes(&low, sizeof(low));
+       reg |= low & NVREG_SLOTTIME_MASK;
+
+       /* Need to stop tx before change takes effect.
+        * Caller has already gained np->lock.
+        */
+       tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
+       if (tx_status)
+               nv_stop_tx(dev);
+       nv_stop_rx(dev);
+       writel(reg, base + NvRegSlotTime);
+       if (tx_status)
+               nv_start_tx(dev);
+       nv_start_rx(dev);
+}
+
+/* Gear Backoff Seeds */
+#define BACKOFF_SEEDSET_ROWS   8
+#define BACKOFF_SEEDSET_LFSRS  15
+
+/* Known Good seed sets */
+static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
+    {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+    {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
+    {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+    {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
+    {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
+    {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
+    {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
+    {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
+
+static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
+    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
+    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
+
+static void nv_gear_backoff_reseed(struct net_device *dev)
+{
+       u8 __iomem *base = get_hwbase(dev);
+       u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
+       u32 temp, seedset, combinedSeed;
+       int i;
+
+       /* Setup seed for free running LFSR */
+       /* We are going to read the time stamp counter 3 times
+          and swizzle bits around to increase randomness */
+       get_random_bytes(&miniseed1, sizeof(miniseed1));
+       miniseed1 &= 0x0fff;
+       if (miniseed1 == 0)
+               miniseed1 = 0xabc;
+
+       get_random_bytes(&miniseed2, sizeof(miniseed2));
+       miniseed2 &= 0x0fff;
+       if (miniseed2 == 0)
+               miniseed2 = 0xabc;
+       miniseed2_reversed =
+               ((miniseed2 & 0xF00) >> 8) |
+                (miniseed2 & 0x0F0) |
+                ((miniseed2 & 0x00F) << 8);
+
+       get_random_bytes(&miniseed3, sizeof(miniseed3));
+       miniseed3 &= 0x0fff;
+       if (miniseed3 == 0)
+               miniseed3 = 0xabc;
+       miniseed3_reversed =
+               ((miniseed3 & 0xF00) >> 8) |
+                (miniseed3 & 0x0F0) |
+                ((miniseed3 & 0x00F) << 8);
+
+       combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
+                      (miniseed2 ^ miniseed3_reversed);
+
+       /* Seeds can not be zero */
+       if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
+               combinedSeed |= 0x08;
+       if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
+               combinedSeed |= 0x8000;
+
+       /* No need to disable tx here */
+       temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
+       temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
+       temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
+       writel(temp,base + NvRegBackOffControl);
+
+       /* Setup seeds for all gear LFSRs. */
+       get_random_bytes(&seedset, sizeof(seedset));
+       seedset = seedset % BACKOFF_SEEDSET_ROWS;
+       for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
+       {
+               temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
+               temp |= main_seedset[seedset][i-1] & 0x3ff;
+               temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
+               writel(temp, base + NvRegBackOffControl);
+       }
+}
+
 /*
  * nv_start_xmit: dev->hard_start_xmit function
  * Called with netif_tx_lock held.
@@ -2088,6 +2306,8 @@ static void nv_tx_done(struct net_device *dev)
                                                dev->stats.tx_fifo_errors++;
                                        if (flags & NV_TX_CARRIERLOST)
                                                dev->stats.tx_carrier_errors++;
+                                       if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+                                               nv_legacybackoff_reseed(dev);
                                        dev->stats.tx_errors++;
                                } else {
                                        dev->stats.tx_packets++;
@@ -2103,6 +2323,8 @@ static void nv_tx_done(struct net_device *dev)
                                                dev->stats.tx_fifo_errors++;
                                        if (flags & NV_TX2_CARRIERLOST)
                                                dev->stats.tx_carrier_errors++;
+                                       if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+                                               nv_legacybackoff_reseed(dev);
                                        dev->stats.tx_errors++;
                                } else {
                                        dev->stats.tx_packets++;
@@ -2144,6 +2366,15 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
                if (flags & NV_TX2_LASTPACKET) {
                        if (!(flags & NV_TX2_ERROR))
                                dev->stats.tx_packets++;
+                       else {
+                               if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+                                       if (np->driver_data & DEV_HAS_GEAR_MODE)
+                                               nv_gear_backoff_reseed(dev);
+                                       else
+                                               nv_legacybackoff_reseed(dev);
+                               }
+                       }
+
                        dev_kfree_skb_any(np->get_tx_ctx->skb);
                        np->get_tx_ctx->skb = NULL;
 
@@ -2905,15 +3136,14 @@ set_speed:
        }
 
        if (np->gigabit == PHY_GIGABIT) {
-               phyreg = readl(base + NvRegRandomSeed);
+               phyreg = readl(base + NvRegSlotTime);
                phyreg &= ~(0x3FF00);
-               if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
-                       phyreg |= NVREG_RNDSEED_FORCE3;
-               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
-                       phyreg |= NVREG_RNDSEED_FORCE2;
+               if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
+                   ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
+                       phyreg |= NVREG_SLOTTIME_10_100_FULL;
                else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
-                       phyreg |= NVREG_RNDSEED_FORCE;
-               writel(phyreg, base + NvRegRandomSeed);
+                       phyreg |= NVREG_SLOTTIME_1000_FULL;
+               writel(phyreg, base + NvRegSlotTime);
        }
 
        phyreg = readl(base + NvRegPhyInterface);
@@ -4843,6 +5073,7 @@ static int nv_open(struct net_device *dev)
        u8 __iomem *base = get_hwbase(dev);
        int ret = 1;
        int oom, i;
+       u32 low;
 
        dprintk(KERN_DEBUG "nv_open: begin\n");
 
@@ -4902,8 +5133,20 @@ static int nv_open(struct net_device *dev)
        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 
        writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
-       get_random_bytes(&i, sizeof(i));
-       writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
+
+       get_random_bytes(&low, sizeof(low));
+       low &= NVREG_SLOTTIME_MASK;
+       if (np->desc_ver == DESC_VER_1) {
+               writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
+       } else {
+               if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
+                       /* setup legacy backoff */
+                       writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
+               } else {
+                       writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
+                       nv_gear_backoff_reseed(dev);
+               }
+       }
        writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
        writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
        if (poll_interval == -1) {
@@ -5110,6 +5353,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 
        /* copy of driver data */
        np->driver_data = id->driver_data;
+       /* copy of device id */
+       np->device_id = id->device;
 
        /* handle different descriptor versions */
        if (id->driver_data & DEV_HAS_HIGH_DMA) {
@@ -5399,6 +5644,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                        pci_name(pci_dev), id1, id2, phyaddr);
                np->phyaddr = phyaddr;
                np->phy_oui = id1 | id2;
+
+               /* Realtek hardcoded phy id1 to all zero's on certain phys */
+               if (np->phy_oui == PHY_OUI_REALTEK2)
+                       np->phy_oui = PHY_OUI_REALTEK;
+               /* Setup phy revision for Realtek */
+               if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
+                       np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
+
                break;
        }
        if (i == 33) {
@@ -5477,6 +5730,28 @@ out:
        return err;
 }
 
+static void nv_restore_phy(struct net_device *dev)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       u16 phy_reserved, mii_control;
+
+       if (np->phy_oui == PHY_OUI_REALTEK &&
+           np->phy_model == PHY_MODEL_REALTEK_8201 &&
+           phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+               mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
+               phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
+               phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+               phy_reserved |= PHY_REALTEK_INIT8;
+               mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
+               mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
+
+               /* restart auto negotiation */
+               mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+               mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+               mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
+       }
+}
+
 static void __devexit nv_remove(struct pci_dev *pci_dev)
 {
        struct net_device *dev = pci_get_drvdata(pci_dev);
@@ -5493,6 +5768,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
        writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
               base + NvRegTransmitPoll);
 
+       /* restore any phy related changes */
+       nv_restore_phy(dev);
+
        /* free all structures */
        free_rings(dev);
        iounmap(get_hwbase(dev));
@@ -5632,83 +5910,83 @@ static struct pci_device_id pci_tbl[] = {
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
        },
        {0,},
 };
@@ -5744,6 +6022,8 @@ module_param(msix, int, 0);
 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
 module_param(dma_64bit, int, 0);
 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
+module_param(phy_cross, int, 0);
+MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
 
 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
index c8c3df737d73582ebe15484c898ff0f2a9ac5666..99a4b990939f7720780e02da49d822dbdf2b9abd 100644 (file)
@@ -98,7 +98,6 @@
 #include "gianfar_mii.h"
 
 #define TX_TIMEOUT      (1*HZ)
-#define SKB_ALLOC_TIMEOUT 1000000
 #undef BRIEF_GFAR_ERRORS
 #undef VERBOSE_GFAR_ERRORS
 
@@ -115,7 +114,9 @@ static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
+struct sk_buff *gfar_new_skb(struct net_device *dev);
+static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+               struct sk_buff *skb);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -783,14 +784,21 @@ int startup_gfar(struct net_device *dev)
 
        rxbdp = priv->rx_bd_base;
        for (i = 0; i < priv->rx_ring_size; i++) {
-               struct sk_buff *skb = NULL;
+               struct sk_buff *skb;
 
-               rxbdp->status = 0;
+               skb = gfar_new_skb(dev);
 
-               skb = gfar_new_skb(dev, rxbdp);
+               if (!skb) {
+                       printk(KERN_ERR "%s: Can't allocate RX buffers\n",
+                                       dev->name);
+
+                       goto err_rxalloc_fail;
+               }
 
                priv->rx_skbuff[i] = skb;
 
+               gfar_new_rxbdp(dev, rxbdp, skb);
+
                rxbdp++;
        }
 
@@ -916,6 +924,7 @@ rx_irq_fail:
 tx_irq_fail:
        free_irq(priv->interruptError, dev);
 err_irq_fail:
+err_rxalloc_fail:      
 rx_skb_fail:
        free_skb_resources(priv);
 tx_skb_fail:
@@ -1328,18 +1337,37 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
+static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+               struct sk_buff *skb)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       u32 * status_len = (u32 *)bdp;
+       u16 flags;
+
+       bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
+                       priv->rx_buffer_size, DMA_FROM_DEVICE);
+
+       flags = RXBD_EMPTY | RXBD_INTERRUPT;
+
+       if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
+               flags |= RXBD_WRAP;
+
+       eieio();
+
+       *status_len = (u32)flags << 16;
+}
+
+
+struct sk_buff * gfar_new_skb(struct net_device *dev)
 {
        unsigned int alignamount;
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb = NULL;
-       unsigned int timeout = SKB_ALLOC_TIMEOUT;
 
        /* We have to allocate the skb, so keep trying till we succeed */
-       while ((!skb) && timeout--)
-               skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
+       skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
 
-       if (NULL == skb)
+       if (!skb)
                return NULL;
 
        alignamount = RXBUF_ALIGNMENT -
@@ -1350,15 +1378,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
         */
        skb_reserve(skb, alignamount);
 
-       bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
-                       priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-       bdp->length = 0;
-
-       /* Mark the buffer empty */
-       eieio();
-       bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
-
        return skb;
 }
 
@@ -1544,10 +1563,31 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
        bdp = priv->cur_rx;
 
        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+               struct sk_buff *newskb;
                rmb();
+
+               /* Add another skb for the future */
+               newskb = gfar_new_skb(dev);
+
                skb = priv->rx_skbuff[priv->skb_currx];
 
-               if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) {
+               /* We drop the frame if we failed to allocate a new buffer */
+               if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
+                                bdp->status & RXBD_ERR)) {
+                       count_errors(bdp->status, dev);
+
+                       if (unlikely(!newskb))
+                               newskb = skb;
+
+                       if (skb) {
+                               dma_unmap_single(&priv->dev->dev,
+                                               bdp->bufPtr,
+                                               priv->rx_buffer_size,
+                                               DMA_FROM_DEVICE);
+
+                               dev_kfree_skb_any(skb);
+                       }
+               } else {
                        /* Increment the number of packets */
                        dev->stats.rx_packets++;
                        howmany++;
@@ -1558,23 +1598,14 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
                        gfar_process_frame(dev, skb, pkt_len);
 
                        dev->stats.rx_bytes += pkt_len;
-               } else {
-                       count_errors(bdp->status, dev);
-
-                       if (skb)
-                               dev_kfree_skb_any(skb);
-
-                       priv->rx_skbuff[priv->skb_currx] = NULL;
                }
 
                dev->last_rx = jiffies;
 
-               /* Clear the status flags for this buffer */
-               bdp->status &= ~RXBD_STATS;
+               priv->rx_skbuff[priv->skb_currx] = newskb;
 
-               /* Add another skb for the future */
-               skb = gfar_new_skb(dev, bdp);
-               priv->rx_skbuff[priv->skb_currx] = skb;
+               /* Setup the new bdp */
+               gfar_new_rxbdp(dev, bdp, newskb);
 
                /* Update to the next pointer */
                if (bdp->status & RXBD_WRAP)
@@ -1584,9 +1615,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 
                /* update to point at the next skb */
                priv->skb_currx =
-                   (priv->skb_currx +
-                    1) & RX_RING_MOD_MASK(priv->rx_ring_size);
-
+                   (priv->skb_currx + 1) &
+                   RX_RING_MOD_MASK(priv->rx_ring_size);
        }
 
        /* Update the current rxbd pointer to be the next one */
@@ -2001,12 +2031,16 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:fsl-gianfar");
+
 /* Structure for a device driver */
 static struct platform_driver gfar_driver = {
        .probe = gfar_probe,
        .remove = gfar_remove,
        .driver = {
                .name = "fsl-gianfar",
+               .owner = THIS_MODULE,
        },
 };
 
index 378a239634955163fdf6e991725179746ea651a1..5d2108c5ac7c872d32409de44edc22f07bf5620e 100644 (file)
@@ -43,6 +43,8 @@
 #include <asm/io.h>
 #include <asm/dma.h>
 #include <asm/uaccess.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
 
 #include "core.h"
 
@@ -127,10 +129,35 @@ static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
 static inline void emac_report_timeout_error(struct emac_instance *dev,
                                             const char *error)
 {
-       if (net_ratelimit())
+       if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
+                                 EMAC_FTR_440EP_PHY_CLK_FIX))
+               DBG(dev, "%s" NL, error);
+       else if (net_ratelimit())
                printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
 }
 
+/* EMAC PHY clock workaround:
+ * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
+ * which allows controlling each EMAC clock
+ */
+static inline void emac_rx_clk_tx(struct emac_instance *dev)
+{
+#ifdef CONFIG_PPC_DCR_NATIVE
+       if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+               dcri_clrset(SDR0, SDR0_MFR,
+                           0, SDR0_MFR_ECS >> dev->cell_index);
+#endif
+}
+
+static inline void emac_rx_clk_default(struct emac_instance *dev)
+{
+#ifdef CONFIG_PPC_DCR_NATIVE
+       if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+               dcri_clrset(SDR0, SDR0_MFR,
+                           SDR0_MFR_ECS >> dev->cell_index, 0);
+#endif
+}
+
 /* PHY polling intervals */
 #define PHY_POLL_LINK_ON       HZ
 #define PHY_POLL_LINK_OFF      (HZ / 5)
@@ -524,7 +551,10 @@ static int emac_configure(struct emac_instance *dev)
                rx_size = dev->rx_fifo_size_gige;
 
                if (dev->ndev->mtu > ETH_DATA_LEN) {
-                       mr1 |= EMAC_MR1_JPSM;
+                       if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+                               mr1 |= EMAC4_MR1_JPSM;
+                       else
+                               mr1 |= EMAC_MR1_JPSM;
                        dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
                } else
                        dev->stop_timeout = STOP_TIMEOUT_1000;
@@ -708,7 +738,7 @@ static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
                rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
 
        /* Wait for management interface to become idle */
-       n = 10;
+       n = 20;
        while (!emac_phy_done(dev, in_be32(&p->stacr))) {
                udelay(1);
                if (!--n) {
@@ -733,7 +763,7 @@ static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
        out_be32(&p->stacr, r);
 
        /* Wait for read to complete */
-       n = 100;
+       n = 200;
        while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
                udelay(1);
                if (!--n) {
@@ -780,7 +810,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
                rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
 
        /* Wait for management interface to be idle */
-       n = 10;
+       n = 20;
        while (!emac_phy_done(dev, in_be32(&p->stacr))) {
                udelay(1);
                if (!--n) {
@@ -806,7 +836,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
        out_be32(&p->stacr, r);
 
        /* Wait for write to complete */
-       n = 100;
+       n = 200;
        while (!emac_phy_done(dev, in_be32(&p->stacr))) {
                udelay(1);
                if (!--n) {
@@ -1094,9 +1124,11 @@ static int emac_open(struct net_device *ndev)
                int link_poll_interval;
                if (dev->phy.def->ops->poll_link(&dev->phy)) {
                        dev->phy.def->ops->read_link(&dev->phy);
+                       emac_rx_clk_default(dev);
                        netif_carrier_on(dev->ndev);
                        link_poll_interval = PHY_POLL_LINK_ON;
                } else {
+                       emac_rx_clk_tx(dev);
                        netif_carrier_off(dev->ndev);
                        link_poll_interval = PHY_POLL_LINK_OFF;
                }
@@ -1174,6 +1206,7 @@ static void emac_link_timer(struct work_struct *work)
 
        if (dev->phy.def->ops->poll_link(&dev->phy)) {
                if (!netif_carrier_ok(dev->ndev)) {
+                       emac_rx_clk_default(dev);
                        /* Get new link parameters */
                        dev->phy.def->ops->read_link(&dev->phy);
 
@@ -1186,6 +1219,7 @@ static void emac_link_timer(struct work_struct *work)
                link_poll_interval = PHY_POLL_LINK_ON;
        } else {
                if (netif_carrier_ok(dev->ndev)) {
+                       emac_rx_clk_tx(dev);
                        netif_carrier_off(dev->ndev);
                        netif_tx_disable(dev->ndev);
                        emac_reinitialize(dev);
@@ -2237,7 +2271,7 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
        return 0;
 }
 
-static struct notifier_block emac_of_bus_notifier = {
+static struct notifier_block emac_of_bus_notifier __devinitdata = {
        .notifier_call = emac_of_bus_notify
 };
 
@@ -2330,6 +2364,19 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
        dev->phy.mdio_read = emac_mdio_read;
        dev->phy.mdio_write = emac_mdio_write;
 
+       /* Enable internal clock source */
+#ifdef CONFIG_PPC_DCR_NATIVE
+       if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+               dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
+#endif
+       /* PHY clock workaround */
+       emac_rx_clk_tx(dev);
+
+       /* Enable internal clock source on 440GX*/
+#ifdef CONFIG_PPC_DCR_NATIVE
+       if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+               dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
+#endif
        /* Configure EMAC with defaults so we can at least use MDIO
         * This is needed mostly for 440GX
         */
@@ -2362,6 +2409,12 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
                        if (!emac_mii_phy_probe(&dev->phy, i))
                                break;
                }
+
+       /* Enable external clock source */
+#ifdef CONFIG_PPC_DCR_NATIVE
+       if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+               dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
+#endif
        mutex_unlock(&emac_phy_map_lock);
        if (i == 0x20) {
                printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
@@ -2487,8 +2540,15 @@ static int __devinit emac_init_config(struct emac_instance *dev)
        }
 
        /* Check EMAC version */
-       if (of_device_is_compatible(np, "ibm,emac4"))
+       if (of_device_is_compatible(np, "ibm,emac4")) {
                dev->features |= EMAC_FTR_EMAC4;
+               if (of_device_is_compatible(np, "ibm,emac-440gx"))
+                       dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
+       } else {
+               if (of_device_is_compatible(np, "ibm,emac-440ep") ||
+                   of_device_is_compatible(np, "ibm,emac-440gr"))
+                       dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
+       }
 
        /* Fixup some feature bits based on the device tree */
        if (of_get_property(np, "has-inverted-stacr-oc", NULL))
@@ -2559,8 +2619,11 @@ static int __devinit emac_probe(struct of_device *ofdev,
        struct device_node **blist = NULL;
        int err, i;
 
-       /* Skip unused/unwired EMACS */
-       if (of_get_property(np, "unused", NULL))
+       /* Skip unused/unwired EMACS.  We leave the check for an unused
+        * property here for now, but new flat device trees should set a
+        * status property to "disabled" instead.
+        */
+       if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
                return -ENODEV;
 
        /* Find ourselves in the bootlist if we are there */
index 4e74d8287c65b91d25a6a95ff36de8a0d2d16a6b..1683db9870a467fdba8d591bb086c447791598d5 100644 (file)
@@ -301,6 +301,14 @@ struct emac_instance {
  * Set if we have new type STACR with STAOPC
  */
 #define EMAC_FTR_HAS_NEW_STACR         0x00000040
+/*
+ * Set if we need phy clock workaround for 440gx
+ */
+#define EMAC_FTR_440GX_PHY_CLK_FIX     0x00000080
+/*
+ * Set if we need phy clock workaround for 440ep or 440gr
+ */
+#define EMAC_FTR_440EP_PHY_CLK_FIX     0x00000100
 
 
 /* Right now, we don't quite handle the always/possible masks on the
@@ -312,8 +320,8 @@ enum {
 
        EMAC_FTRS_POSSIBLE      =
 #ifdef CONFIG_IBM_NEW_EMAC_EMAC4
-           EMAC_FTR_EMAC4      | EMAC_FTR_HAS_NEW_STACR        |
-           EMAC_FTR_STACR_OC_INVERT    |
+           EMAC_FTR_EMAC4 | EMAC_FTR_HAS_NEW_STACR |
+           EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
 #endif
 #ifdef CONFIG_IBM_NEW_EMAC_TAH
            EMAC_FTR_HAS_TAH    |
@@ -324,7 +332,7 @@ enum {
 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
            EMAC_FTR_HAS_RGMII  |
 #endif
-           0,
+       EMAC_FTR_440EP_PHY_CLK_FIX,
 };
 
 static inline int emac_has_feature(struct emac_instance *dev,
index 6869f08c9dcba0188b920524e68c747d1445fb9f..10c267b2b9610b1e446a4fe32e4deba9c5700976 100644 (file)
@@ -61,8 +61,8 @@ int __devinit mal_register_commac(struct mal_instance *mal,
        return 0;
 }
 
-void __devexit mal_unregister_commac(struct mal_instance       *mal,
-                                    struct mal_commac          *commac)
+void mal_unregister_commac(struct mal_instance *mal,
+               struct mal_commac *commac)
 {
        unsigned long flags;
 
@@ -136,6 +136,14 @@ void mal_enable_rx_channel(struct mal_instance *mal, int channel)
 {
        unsigned long flags;
 
+       /*
+        * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
+        * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
+        * for the bitmask
+        */
+       if (!(channel % 8))
+               channel >>= 3;
+
        spin_lock_irqsave(&mal->lock, flags);
 
        MAL_DBG(mal, "enable_rx(%d)" NL, channel);
@@ -148,6 +156,14 @@ void mal_enable_rx_channel(struct mal_instance *mal, int channel)
 
 void mal_disable_rx_channel(struct mal_instance *mal, int channel)
 {
+       /*
+        * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
+        * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
+        * for the bitmask
+        */
+       if (!(channel % 8))
+               channel >>= 3;
+
        set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
 
        MAL_DBG(mal, "disable_rx(%d)" NL, channel);
index 5757788227be8d887537b21fbaf97dd7853f55c0..e32da3de2695de79564bfcfa5707d48b507c0ebd 100644 (file)
@@ -179,7 +179,7 @@ void rgmii_put_mdio(struct of_device *ofdev, int input)
        mutex_unlock(&dev->lock);
 }
 
-void __devexit rgmii_detach(struct of_device *ofdev, int input)
+void rgmii_detach(struct of_device *ofdev, int input)
 {
        struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
        struct rgmii_regs __iomem *p = dev->base;
index b023d10d7e1c2d14ecac3f61e3af4d6501392ff1..30173a9fb557d930f543d0c237f3fff0a5cb777b 100644 (file)
@@ -35,7 +35,7 @@ int __devinit tah_attach(struct of_device *ofdev, int channel)
        return 0;
 }
 
-void __devexit tah_detach(struct of_device *ofdev, int channel)
+void tah_detach(struct of_device *ofdev, int channel)
 {
        struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
 
index 2ea472aeab06ac8c03deb714853024b8cce265f1..17b154124943e5baf89e6640f26643ade28be72c 100644 (file)
@@ -189,7 +189,7 @@ void zmii_set_speed(struct of_device *ofdev, int input, int speed)
        mutex_unlock(&dev->lock);
 }
 
-void __devexit zmii_detach(struct of_device *ofdev, int input)
+void zmii_detach(struct of_device *ofdev, int input)
 {
        struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
 
index aaee02e9e3f03c4bc0da116942964996fe061fda..ae398f04c7b44d8ef8865ed4f902dba1d12a0172 100644 (file)
@@ -871,6 +871,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                goto err_pci_reg;
 
        pci_set_master(pdev);
+       pci_save_state(pdev);
 
        err = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct igb_adapter));
@@ -4079,6 +4080,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
        }
        pci_set_master(pdev);
+       pci_restore_state(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
index 9f584521304a8a84ce5e8230f43df2fa72def8b3..083b0dd70fef1f1431e284b0aa7aa864047711b8 100644 (file)
@@ -60,6 +60,7 @@ static struct platform_driver ali_ircc_driver = {
        .resume         = ali_ircc_resume,
        .driver         = {
                .name   = ALI_IRCC_DRIVER_NAME,
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -2256,6 +2257,7 @@ static void FIR2SIR(int iobase)
 MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
 MODULE_DESCRIPTION("ALi FIR Controller Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME);
 
 
 module_param_array(io, int, NULL, 0);
index 8db71ab20456b38d14ce1f8e5cd4e1e8e4c87ecf..d5c2d27f3ea4d12c8b29a7aac0f1426303f8dc42 100644 (file)
@@ -908,6 +908,7 @@ static int pxa_irda_remove(struct platform_device *_dev)
 static struct platform_driver pxa_ir_driver = {
        .driver         = {
                .name   = "pxa2xx-ir",
+               .owner  = THIS_MODULE,
        },
        .probe          = pxa_irda_probe,
        .remove         = pxa_irda_remove,
@@ -929,3 +930,4 @@ module_init(pxa_irda_init);
 module_exit(pxa_irda_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-ir");
index 056639f72becf049657601585f82e83c0c20a6a1..1bc8518f9197d9a00293766bc953990341b5b26f 100644 (file)
@@ -1008,6 +1008,7 @@ static struct platform_driver sa1100ir_driver = {
        .resume         = sa1100_irda_resume,
        .driver         = {
                .name   = "sa11x0-ir",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -1041,3 +1042,4 @@ MODULE_LICENSE("GPL");
 MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
 MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
 MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
+MODULE_ALIAS("platform:sa11x0-ir");
index cb371a8c24a72244092b4353757a1e886dd4d91a..7b859220c255b3b81017f156548df2dcca2f32e7 100644 (file)
@@ -3431,6 +3431,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        }
 
        pci_set_master(pdev);
+       pci_save_state(pdev);
 
 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
        netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
@@ -3721,6 +3722,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
        }
        pci_set_master(pdev);
+       pci_restore_state(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
index 5c154fe1385956d2797a8946000397b4558631a0..07944820f74582b5230c4a362acac0b81eccbec6 100644 (file)
@@ -249,6 +249,7 @@ out:
 MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
 module_param(sonic_debug, int, 0);
 MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
+MODULE_ALIAS("platform:jazzsonic");
 
 #include "sonic.c"
 
@@ -271,6 +272,7 @@ static struct platform_driver jazz_sonic_driver = {
        .remove = __devexit_p(jazz_sonic_device_remove),
        .driver = {
                .name   = jazz_sonic_string,
+               .owner  = THIS_MODULE,
        },
 };
 
index 1d24a73a0e1a1d268c1d319bcd3625a63d5d47da..e18576316bda93050cdd5849da80ef67cd85333a 100644 (file)
@@ -883,7 +883,7 @@ static int korina_init(struct net_device *dev)
 static int korina_restart(struct net_device *dev)
 {
        struct korina_private *lp = netdev_priv(dev);
-       int ret = 0;
+       int ret;
 
        /*
         * Disable interrupts
@@ -987,7 +987,7 @@ static void korina_poll_controller(struct net_device *dev)
 static int korina_open(struct net_device *dev)
 {
        struct korina_private *lp = netdev_priv(dev);
-       int ret = 0;
+       int ret;
 
        /* Initialize */
        ret = korina_init(dev);
@@ -1031,6 +1031,8 @@ static int korina_open(struct net_device *dev)
                    dev->name, lp->und_irq);
                goto err_free_ovr_irq;
        }
+out:
+       return ret;
 
 err_free_ovr_irq:
        free_irq(lp->ovr_irq, dev);
@@ -1041,8 +1043,6 @@ err_free_rx_irq:
 err_release:
        korina_free_ring(dev);
        goto out;
-out:
-       return ret;
 }
 
 static int korina_close(struct net_device *dev)
@@ -1082,7 +1082,7 @@ static int korina_probe(struct platform_device *pdev)
        struct korina_private *lp;
        struct net_device *dev;
        struct resource *r;
-       int retval, err;
+       int rc;
 
        dev = alloc_etherdev(sizeof(struct korina_private));
        if (!dev) {
@@ -1106,7 +1106,7 @@ static int korina_probe(struct platform_device *pdev)
        lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
        if (!lp->eth_regs) {
                printk(KERN_ERR DRV_NAME "cannot remap registers\n");
-               retval = -ENXIO;
+               rc = -ENXIO;
                goto probe_err_out;
        }
 
@@ -1114,7 +1114,7 @@ static int korina_probe(struct platform_device *pdev)
        lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
        if (!lp->rx_dma_regs) {
                printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n");
-               retval = -ENXIO;
+               rc = -ENXIO;
                goto probe_err_dma_rx;
        }
 
@@ -1122,14 +1122,14 @@ static int korina_probe(struct platform_device *pdev)
        lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
        if (!lp->tx_dma_regs) {
                printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n");
-               retval = -ENXIO;
+               rc = -ENXIO;
                goto probe_err_dma_tx;
        }
 
        lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
        if (!lp->td_ring) {
                printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n");
-               retval = -ENOMEM;
+               rc = -ENXIO;
                goto probe_err_td_ring;
        }
 
@@ -1166,14 +1166,14 @@ static int korina_probe(struct platform_device *pdev)
        lp->mii_if.phy_id_mask = 0x1f;
        lp->mii_if.reg_num_mask = 0x1f;
 
-       err = register_netdev(dev);
-       if (err) {
+       rc = register_netdev(dev);
+       if (rc < 0) {
                printk(KERN_ERR DRV_NAME
-                       ": cannot register net device %d\n", err);
-               retval = -EINVAL;
+                       ": cannot register net device %d\n", rc);
                goto probe_err_register;
        }
-       return 0;
+out:
+       return rc;
 
 probe_err_register:
        kfree(lp->td_ring);
@@ -1185,7 +1185,7 @@ probe_err_dma_rx:
        iounmap(lp->eth_regs);
 probe_err_out:
        free_netdev(dev);
-       return retval;
+       goto out;
 }
 
 static int korina_remove(struct platform_device *pdev)
@@ -1193,12 +1193,9 @@ static int korina_remove(struct platform_device *pdev)
        struct korina_device *bif = platform_get_drvdata(pdev);
        struct korina_private *lp = netdev_priv(bif->dev);
 
-       if (lp->eth_regs)
-               iounmap(lp->eth_regs);
-       if (lp->rx_dma_regs)
-               iounmap(lp->rx_dma_regs);
-       if (lp->tx_dma_regs)
-               iounmap(lp->tx_dma_regs);
+       iounmap(lp->eth_regs);
+       iounmap(lp->rx_dma_regs);
+       iounmap(lp->tx_dma_regs);
 
        platform_set_drvdata(pdev, NULL);
        unregister_netdev(bif->dev);
index d513bb8a490210c3dbaac6bfcdcd2d69b77b09de..92dccd43bdcab39fbd0bcd374d3447a33961443c 100644 (file)
@@ -1281,6 +1281,7 @@ static struct platform_driver macb_driver = {
        .remove         = __exit_p(macb_remove),
        .driver         = {
                .name           = "macb",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -1300,3 +1301,4 @@ module_exit(macb_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
 MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
+MODULE_ALIAS("platform:macb");
index cdaa8fc218091a947996280d27c63caa16f56f84..0b32648a213600fcecae9eda9496a63c996b1768 100644 (file)
@@ -830,6 +830,7 @@ static struct platform_driver meth_driver = {
        .remove = __devexit_p(meth_remove),
        .driver = {
                .name   = "meth",
+               .owner  = THIS_MODULE,
        }
 };
 
@@ -855,3 +856,4 @@ module_exit(meth_exit_module);
 MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
 MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:meth");
index 601ffd69ebc8f5b260024c1197f2fdb754ed530b..381b36e5f64c2fe11f559d182b5abab91f0de388 100644 (file)
@@ -2030,6 +2030,7 @@ static struct platform_driver mv643xx_eth_driver = {
        .shutdown = mv643xx_eth_shutdown,
        .driver = {
                .name = MV643XX_ETH_NAME,
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -2038,6 +2039,7 @@ static struct platform_driver mv643xx_eth_shared_driver = {
        .remove = mv643xx_eth_shared_remove,
        .driver = {
                .name = MV643XX_ETH_SHARED_NAME,
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -2085,7 +2087,8 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
                " and Dale Farnsworth");
 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
-MODULE_ALIAS("platform:mv643xx_eth");
+MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
+MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
 
 /*
  * The second part is the low level driver of the gigE ethernet ports.
index 78d34af13a1ca8c59817a3c203b77e715ae6399d..dc442e370850bfb8d6fc73b7db960638ec00661c 100644 (file)
@@ -502,4 +502,4 @@ module_exit(netx_eth_cleanup);
 
 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
 MODULE_LICENSE("GPL");
-
+MODULE_ALIAS("platform:" CARDNAME);
index 05748ca6f216831543574dda76d0778883365865..af7356468251ec4fc5aedb23a263a2cc7f70a1e5 100644 (file)
@@ -1132,8 +1132,8 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
        u32 fw_minor = 0;
        u32 fw_build = 0;
        char brd_name[NETXEN_MAX_SHORT_NAME];
-       struct netxen_new_user_info user_info;
-       int i, addr = NETXEN_USER_START;
+       char serial_num[32];
+       int i, addr;
        __le32 *ptr32;
 
        struct netxen_board_info *board_info = &(adapter->ahw.boardcfg);
@@ -1150,10 +1150,10 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
                valid = 0;
        }
        if (valid) {
-               ptr32 = (u32 *) & user_info;
-               for (i = 0;
-                    i < sizeof(struct netxen_new_user_info) / sizeof(u32);
-                    i++) {
+               ptr32 = (u32 *)&serial_num;
+               addr = NETXEN_USER_START +
+                      offsetof(struct netxen_new_user_info, serial_num);
+               for (i = 0; i < 8; i++) {
                        if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) {
                                printk("%s: ERROR reading %s board userarea.\n",
                                       netxen_nic_driver_name,
@@ -1163,10 +1163,11 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
                        ptr32++;
                        addr += sizeof(u32);
                }
+
                get_brd_name_by_type(board_info->board_type, brd_name);
 
                printk("NetXen %s Board S/N %s  Chip id 0x%x\n",
-                      brd_name, user_info.serial_num, board_info->chip_id);
+                      brd_name, serial_num, board_info->chip_id);
 
                printk("NetXen %s Board #%d, Chip id 0x%x\n",
                       board_info->board_type == 0x0b ? "XGB" : "GBE",
index 7565c2d7f30e9ce956d3e0a5c6009e1da9fb2038..4009c4ce96b4fd609425e8fe3b95d30f38d705a7 100644 (file)
@@ -33,8 +33,8 @@
 
 #define DRV_MODULE_NAME                "niu"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "0.7"
-#define DRV_MODULE_RELDATE     "February 18, 2008"
+#define DRV_MODULE_VERSION     "0.8"
+#define DRV_MODULE_RELDATE     "April 24, 2008"
 
 static char version[] __devinitdata =
        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -673,11 +673,16 @@ static int serdes_init_10g(struct niu *np)
        }
 
        if ((sig & mask) != val) {
+               if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+                       np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                       return 0;
+               }
                dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
                        "[%08x]\n", np->port, (int) (sig & mask), (int) val);
                return -ENODEV;
        }
-
+       if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
+               np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
        return 0;
 }
 
@@ -998,6 +1003,28 @@ static int bcm8704_user_dev3_readback(struct niu *np, int reg)
        return 0;
 }
 
+static int bcm8706_init_user_dev3(struct niu *np)
+{
+       int err;
+
+
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_OPT_DIGITAL_CTRL);
+       if (err < 0)
+               return err;
+       err &= ~USER_ODIG_CTRL_GPIOS;
+       err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+       err |=  USER_ODIG_CTRL_RESV2;
+       err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                        BCM8704_USER_OPT_DIGITAL_CTRL, err);
+       if (err)
+               return err;
+
+       mdelay(1000);
+
+       return 0;
+}
+
 static int bcm8704_init_user_dev3(struct niu *np)
 {
        int err;
@@ -1127,33 +1154,11 @@ static int xcvr_init_10g_mrvl88x2011(struct niu *np)
                          MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
 }
 
-static int xcvr_init_10g_bcm8704(struct niu *np)
+
+static int xcvr_diag_bcm870x(struct niu *np)
 {
-       struct niu_link_config *lp = &np->link_config;
        u16 analog_stat0, tx_alarm_status;
-       int err;
-
-       err = bcm8704_reset(np);
-       if (err)
-               return err;
-
-       err = bcm8704_init_user_dev3(np);
-       if (err)
-               return err;
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
-                       MII_BMCR);
-       if (err < 0)
-               return err;
-       err &= ~BMCR_LOOPBACK;
-
-       if (lp->loopback_mode == LOOPBACK_MAC)
-               err |= BMCR_LOOPBACK;
-
-       err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
-                        MII_BMCR, err);
-       if (err)
-               return err;
+       int err = 0;
 
 #if 1
        err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
@@ -1211,6 +1216,89 @@ static int xcvr_init_10g_bcm8704(struct niu *np)
        return 0;
 }
 
+static int xcvr_10g_set_lb_bcm870x(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       int err;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                       MII_BMCR);
+       if (err < 0)
+               return err;
+
+       err &= ~BMCR_LOOPBACK;
+
+       if (lp->loopback_mode == LOOPBACK_MAC)
+               err |= BMCR_LOOPBACK;
+
+       err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                        MII_BMCR, err);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int xcvr_init_10g_bcm8706(struct niu *np)
+{
+       int err = 0;
+       u64 val;
+
+       if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
+           (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
+                       return err;
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~XMAC_CONFIG_LED_POLARITY;
+       val |= XMAC_CONFIG_FORCE_LED_ON;
+       nw64_mac(XMAC_CONFIG, val);
+
+       val = nr64(MIF_CONFIG);
+       val |= MIF_CONFIG_INDIRECT_MODE;
+       nw64(MIF_CONFIG, val);
+
+       err = bcm8704_reset(np);
+       if (err)
+               return err;
+
+       err = xcvr_10g_set_lb_bcm870x(np);
+       if (err)
+               return err;
+
+       err = bcm8706_init_user_dev3(np);
+       if (err)
+               return err;
+
+       err = xcvr_diag_bcm870x(np);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int xcvr_init_10g_bcm8704(struct niu *np)
+{
+       int err;
+
+       err = bcm8704_reset(np);
+       if (err)
+               return err;
+
+       err = bcm8704_init_user_dev3(np);
+       if (err)
+               return err;
+
+       err = xcvr_10g_set_lb_bcm870x(np);
+       if (err)
+               return err;
+
+       err =  xcvr_diag_bcm870x(np);
+       if (err)
+               return err;
+
+       return 0;
+}
+
 static int xcvr_init_10g(struct niu *np)
 {
        int phy_id, err;
@@ -1548,6 +1636,59 @@ out:
        return err;
 }
 
+static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
+{
+       int err, link_up;
+       link_up = 0;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+                       BCM8704_PMD_RCV_SIGDET);
+       if (err < 0)
+               goto out;
+       if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+               err = 0;
+               goto out;
+       }
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                       BCM8704_PCS_10G_R_STATUS);
+       if (err < 0)
+               goto out;
+
+       if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+               err = 0;
+               goto out;
+       }
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+                       BCM8704_PHYXS_XGXS_LANE_STAT);
+       if (err < 0)
+               goto out;
+       if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+                   PHYXS_XGXS_LANE_STAT_MAGIC |
+                   PHYXS_XGXS_LANE_STAT_PATTEST |
+                   PHYXS_XGXS_LANE_STAT_LANE3 |
+                   PHYXS_XGXS_LANE_STAT_LANE2 |
+                   PHYXS_XGXS_LANE_STAT_LANE1 |
+                   PHYXS_XGXS_LANE_STAT_LANE0)) {
+               err = 0;
+               np->link_config.active_speed = SPEED_INVALID;
+               np->link_config.active_duplex = DUPLEX_INVALID;
+               goto out;
+       }
+
+       link_up = 1;
+       np->link_config.active_speed = SPEED_10000;
+       np->link_config.active_duplex = DUPLEX_FULL;
+       err = 0;
+
+out:
+       *link_up_p = link_up;
+       if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
+               err = 0;
+       return err;
+}
+
 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
 {
        int err, link_up;
@@ -1627,6 +1768,82 @@ static int link_status_10g(struct niu *np, int *link_up_p)
        return err;
 }
 
+static int niu_10g_phy_present(struct niu *np)
+{
+       u64 sig, mask, val;
+
+       sig = nr64(ESR_INT_SIGNALS);
+       switch (np->port) {
+       case 0:
+               mask = ESR_INT_SIGNALS_P0_BITS;
+               val = (ESR_INT_SRDY0_P0 |
+                      ESR_INT_DET0_P0 |
+                      ESR_INT_XSRDY_P0 |
+                      ESR_INT_XDP_P0_CH3 |
+                      ESR_INT_XDP_P0_CH2 |
+                      ESR_INT_XDP_P0_CH1 |
+                      ESR_INT_XDP_P0_CH0);
+               break;
+
+       case 1:
+               mask = ESR_INT_SIGNALS_P1_BITS;
+               val = (ESR_INT_SRDY0_P1 |
+                      ESR_INT_DET0_P1 |
+                      ESR_INT_XSRDY_P1 |
+                      ESR_INT_XDP_P1_CH3 |
+                      ESR_INT_XDP_P1_CH2 |
+                      ESR_INT_XDP_P1_CH1 |
+                      ESR_INT_XDP_P1_CH0);
+               break;
+
+       default:
+               return 0;
+       }
+
+       if ((sig & mask) != val)
+               return 0;
+       return 1;
+}
+
+static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
+{
+       unsigned long flags;
+       int err = 0;
+       int phy_present;
+       int phy_present_prev;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+               phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
+                       1 : 0;
+               phy_present = niu_10g_phy_present(np);
+               if (phy_present != phy_present_prev) {
+                       /* state change */
+                       if (phy_present) {
+                               np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                               if (np->phy_ops->xcvr_init)
+                                       err = np->phy_ops->xcvr_init(np);
+                               if (err) {
+                                       /* debounce */
+                                       np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                               }
+                       } else {
+                               np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                               *link_up_p = 0;
+                               niuwarn(LINK, "%s: Hotplug PHY Removed\n",
+                                       np->dev->name);
+                       }
+               }
+               if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
+                       err = link_status_10g_bcm8706(np, link_up_p);
+       }
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return err;
+}
+
 static int link_status_1g(struct niu *np, int *link_up_p)
 {
        struct niu_link_config *lp = &np->link_config;
@@ -1761,6 +1978,12 @@ static const struct niu_phy_ops phy_ops_10g_fiber = {
        .link_status            = link_status_10g,
 };
 
+static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
+       .serdes_init            = serdes_init_10g,
+       .xcvr_init              = xcvr_init_10g_bcm8706,
+       .link_status            = link_status_10g_hotplug,
+};
+
 static const struct niu_phy_ops phy_ops_10g_copper = {
        .serdes_init            = serdes_init_10g,
        .link_status            = link_status_10g, /* XXX */
@@ -1792,6 +2015,11 @@ static const struct niu_phy_template phy_template_10g_fiber = {
        .phy_addr_base  = 8,
 };
 
+static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
+       .ops            = &phy_ops_10g_fiber_hotplug,
+       .phy_addr_base  = 8,
+};
+
 static const struct niu_phy_template phy_template_10g_copper = {
        .ops            = &phy_ops_10g_copper,
        .phy_addr_base  = 10,
@@ -1996,6 +2224,13 @@ static int niu_determine_phy_disposition(struct niu *np)
                            plat_type == PLAT_TYPE_VF_P1)
                                phy_addr_off = 8;
                        phy_addr_off += np->port;
+                       if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+                               tp = &phy_template_10g_fiber_hotplug;
+                               if (np->port == 0)
+                                       phy_addr_off = 8;
+                               if (np->port == 1)
+                                       phy_addr_off = 12;
+                       }
                        break;
 
                case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
@@ -6773,6 +7008,37 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
        return 0;
 }
 
+/* niu board models have a trailing dash version incremented
+ * with HW rev change. Need to ingnore the  dash version while
+ * checking for match
+ *
+ * for example, for the 10G card the current vpd.board_model
+ * is 501-5283-04, of which -04 is the  dash version and have
+ * to be ignored
+ */
+static int niu_board_model_match(struct niu *np, const char *model)
+{
+       return !strncmp(np->vpd.board_model, model, strlen(model));
+}
+
+static int niu_pci_vpd_get_nports(struct niu *np)
+{
+       int ports = 0;
+
+       if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) ||
+           (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) ||
+           (niu_board_model_match(np, NIU_ALONSO_BM_STR))) {
+               ports = 4;
+       } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) ||
+                  (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) ||
+                  (niu_board_model_match(np, NIU_FOXXY_BM_STR)) ||
+                  (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) {
+               ports = 2;
+       }
+
+       return ports;
+}
+
 static void __devinit niu_pci_vpd_validate(struct niu *np)
 {
        struct net_device *dev = np->dev;
@@ -6799,6 +7065,9 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
                }
                if (np->flags & NIU_FLAGS_10G)
                         np->mac_xcvr = MAC_XCVR_XPCS;
+       } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
+               np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+                             NIU_FLAGS_HOTPLUG_PHY);
        } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
                dev_err(np->device, PFX "Illegal phy string [%s].\n",
                        np->vpd.phy_type);
@@ -6987,11 +7256,17 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
                if (parent->plat_type == PLAT_TYPE_NIU) {
                        parent->num_ports = 2;
                } else {
-                       parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
-                               ESPC_NUM_PORTS_MACS_VAL;
-
-                       if (!parent->num_ports)
-                               parent->num_ports = 4;
+                       parent->num_ports = niu_pci_vpd_get_nports(np);
+                       if (!parent->num_ports) {
+                               /* Fall back to SPROM as last resort.
+                                * This will fail on most cards.
+                                */
+                               parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
+                                       ESPC_NUM_PORTS_MACS_VAL;
+
+                               if (!parent->num_ports)
+                                       return -ENODEV;
+                       }
                }
        }
 
@@ -7015,7 +7290,8 @@ static int __devinit phy_record(struct niu_parent *parent,
                return 0;
        if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
                if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
+                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
+                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
                        return 0;
        } else {
                if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
@@ -7262,7 +7538,6 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
        u32 val;
        int err;
 
-
        if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
            !strcmp(np->vpd.model, "SUNW,CP3260")) {
                num_10g = 0;
@@ -7273,6 +7548,12 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
                       phy_encode(PORT_TYPE_1G, 1) |
                       phy_encode(PORT_TYPE_1G, 2) |
                       phy_encode(PORT_TYPE_1G, 3));
+       } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
+               num_10g = 2;
+               num_1g = 0;
+               parent->num_ports = 2;
+               val = (phy_encode(PORT_TYPE_10G, 0) |
+                      phy_encode(PORT_TYPE_10G, 1));
        } else {
                err = fill_phy_probe_info(np, parent, info);
                if (err)
@@ -7733,15 +8014,16 @@ static int __devinit niu_get_invariants(struct niu *np)
 
        have_props = !err;
 
-       err = niu_get_and_validate_port(np);
-       if (err)
-               return err;
-
        err = niu_init_mac_ipp_pcs_base(np);
        if (err)
                return err;
 
-       if (!have_props) {
+       if (have_props) {
+               err = niu_get_and_validate_port(np);
+               if (err)
+                       return err;
+
+       } else  {
                if (np->parent->plat_type == PLAT_TYPE_NIU)
                        return -EINVAL;
 
@@ -7753,10 +8035,17 @@ static int __devinit niu_get_invariants(struct niu *np)
                        niu_pci_vpd_fetch(np, offset);
                nw64(ESPC_PIO_EN, 0);
 
-               if (np->flags & NIU_FLAGS_VPD_VALID)
+               if (np->flags & NIU_FLAGS_VPD_VALID) {
                        niu_pci_vpd_validate(np);
+                       err = niu_get_and_validate_port(np);
+                       if (err)
+                               return err;
+               }
 
                if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
+                       err = niu_get_and_validate_port(np);
+                       if (err)
+                               return err;
                        err = niu_pci_probe_sprom(np);
                        if (err)
                                return err;
index 336aed08b27507964394f3cca929c1c2b4112dd4..97ffbe137bcbaa20fbea8ae4eeedf54c9b9ac7a8 100644 (file)
@@ -2537,6 +2537,7 @@ struct fcram_hash_ipv6 {
 
 #define NIU_PHY_ID_MASK                        0xfffff0f0
 #define NIU_PHY_ID_BCM8704             0x00206030
+#define NIU_PHY_ID_BCM8706             0x00206035
 #define NIU_PHY_ID_BCM5464R            0x002060b0
 #define NIU_PHY_ID_MRVL88X2011         0x01410020
 
@@ -2937,6 +2938,15 @@ struct rx_ring_info {
 
 #define NIU_MAX_MTU            9216
 
+/* VPD strings */
+#define        NIU_QGC_LP_BM_STR       "501-7606"
+#define        NIU_2XGF_LP_BM_STR      "501-7283"
+#define        NIU_QGC_PEM_BM_STR      "501-7765"
+#define        NIU_2XGF_PEM_BM_STR     "501-7626"
+#define        NIU_ALONSO_BM_STR       "373-0202"
+#define        NIU_FOXXY_BM_STR        "501-7961"
+#define        NIU_2XGF_MRVL_BM_STR    "SK-6E82"
+
 #define NIU_VPD_MIN_MAJOR      3
 #define NIU_VPD_MIN_MINOR      4
 
@@ -3199,6 +3209,8 @@ struct niu {
        struct niu_parent               *parent;
 
        u32                             flags;
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT  0x02000000 /* Removebale PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY          0x01000000 /* Removebale PHY */
 #define NIU_FLAGS_VPD_VALID            0x00800000 /* VPD has valid version */
 #define NIU_FLAGS_MSIX                 0x00400000 /* MSI-X in use */
 #define NIU_FLAGS_MCAST                        0x00200000 /* multicast filter enabled */
index 963630c65ca940d9dfc59ed57370c54dbfed7847..94e0b7ed76f16ff8c2c2a7f43aaf341c4cace12d 100644 (file)
@@ -89,6 +89,9 @@ int mdiobus_register(struct mii_bus *bus)
 
                        phydev->bus = bus;
 
+                       /* Run all of the fixups for this PHY */
+                       phy_scan_fixups(phydev);
+
                        err = device_register(&phydev->dev);
 
                        if (err) {
index 12fccb1c76dc0c147e3291aaad803c8d5dbc9caf..3c18bb594957a3e2e0320274ff88897189d8b594 100644 (file)
@@ -406,8 +406,10 @@ int phy_mii_ioctl(struct phy_device *phydev,
                
                if (mii_data->reg_num == MII_BMCR 
                                && val & BMCR_RESET
-                               && phydev->drv->config_init)
+                               && phydev->drv->config_init) {
+                       phy_scan_fixups(phydev);
                        phydev->drv->config_init(phydev);
+               }
                break;
 
        default:
index 8b1121b02f9806d9323cfea84fb157bc83c5ae08..ddf8d51832a679dcd7b2767bd8c9793d5b2a8a23 100644 (file)
@@ -53,6 +53,96 @@ static void phy_device_release(struct device *dev)
        phy_device_free(to_phy_device(dev));
 }
 
+static LIST_HEAD(phy_fixup_list);
+static DEFINE_MUTEX(phy_fixup_lock);
+
+/*
+ * Creates a new phy_fixup and adds it to the list
+ * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
+ * @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
+ *     It can also be PHY_ANY_UID
+ * @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
+ *     comparison
+ * @run: The actual code to be run when a matching PHY is found
+ */
+int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+               int (*run)(struct phy_device *))
+{
+       struct phy_fixup *fixup;
+
+       fixup = kzalloc(sizeof(struct phy_fixup), GFP_KERNEL);
+       if (!fixup)
+               return -ENOMEM;
+
+       strncpy(fixup->bus_id, bus_id, BUS_ID_SIZE);
+       fixup->phy_uid = phy_uid;
+       fixup->phy_uid_mask = phy_uid_mask;
+       fixup->run = run;
+
+       mutex_lock(&phy_fixup_lock);
+       list_add_tail(&fixup->list, &phy_fixup_list);
+       mutex_unlock(&phy_fixup_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(phy_register_fixup);
+
+/* Registers a fixup to be run on any PHY with the UID in phy_uid */
+int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
+               int (*run)(struct phy_device *))
+{
+       return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
+}
+EXPORT_SYMBOL(phy_register_fixup_for_uid);
+
+/* Registers a fixup to be run on the PHY with id string bus_id */
+int phy_register_fixup_for_id(const char *bus_id,
+               int (*run)(struct phy_device *))
+{
+       return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
+}
+EXPORT_SYMBOL(phy_register_fixup_for_id);
+
+/*
+ * Returns 1 if fixup matches phydev in bus_id and phy_uid.
+ * Fixups can be set to match any in one or more fields.
+ */
+static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
+{
+       if (strcmp(fixup->bus_id, phydev->dev.bus_id) != 0)
+               if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0)
+                       return 0;
+
+       if ((fixup->phy_uid & fixup->phy_uid_mask) !=
+                       (phydev->phy_id & fixup->phy_uid_mask))
+               if (fixup->phy_uid != PHY_ANY_UID)
+                       return 0;
+
+       return 1;
+}
+
+/* Runs any matching fixups for this phydev */
+int phy_scan_fixups(struct phy_device *phydev)
+{
+       struct phy_fixup *fixup;
+
+       mutex_lock(&phy_fixup_lock);
+       list_for_each_entry(fixup, &phy_fixup_list, list) {
+               if (phy_needs_fixup(phydev, fixup)) {
+                       int err;
+
+                       err = fixup->run(phydev);
+
+                       if (err < 0)
+                               return err;
+               }
+       }
+       mutex_unlock(&phy_fixup_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(phy_scan_fixups);
+
 struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
 {
        struct phy_device *dev;
@@ -179,13 +269,13 @@ void phy_prepare_link(struct phy_device *phydev,
  *   choose to call only the subset of functions which provide
  *   the desired functionality.
  */
-struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
+struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
                void (*handler)(struct net_device *), u32 flags,
                phy_interface_t interface)
 {
        struct phy_device *phydev;
 
-       phydev = phy_attach(dev, phy_id, flags, interface);
+       phydev = phy_attach(dev, bus_id, flags, interface);
 
        if (IS_ERR(phydev))
                return phydev;
@@ -226,7 +316,7 @@ static int phy_compare_id(struct device *dev, void *data)
 /**
  * phy_attach - attach a network device to a particular PHY device
  * @dev: network device to attach
- * @phy_id: PHY device to attach
+ * @bus_id: PHY device to attach
  * @flags: PHY device's dev_flags
  * @interface: PHY device's interface
  *
@@ -238,7 +328,7 @@ static int phy_compare_id(struct device *dev, void *data)
  *     change.  The phy_device is returned to the attaching driver.
  */
 struct phy_device *phy_attach(struct net_device *dev,
-               const char *phy_id, u32 flags, phy_interface_t interface)
+               const char *bus_id, u32 flags, phy_interface_t interface)
 {
        struct bus_type *bus = &mdio_bus_type;
        struct phy_device *phydev;
@@ -246,12 +336,12 @@ struct phy_device *phy_attach(struct net_device *dev,
 
        /* Search the list of PHY devices on the mdio bus for the
         * PHY with the requested name */
-       d = bus_find_device(bus, NULL, (void *)phy_id, phy_compare_id);
+       d = bus_find_device(bus, NULL, (void *)bus_id, phy_compare_id);
 
        if (d) {
                phydev = to_phy_device(d);
        } else {
-               printk(KERN_ERR "%s not found\n", phy_id);
+               printk(KERN_ERR "%s not found\n", bus_id);
                return ERR_PTR(-ENODEV);
        }
 
@@ -271,7 +361,7 @@ struct phy_device *phy_attach(struct net_device *dev,
 
        if (phydev->attached_dev) {
                printk(KERN_ERR "%s: %s already attached\n",
-                               dev->name, phy_id);
+                               dev->name, bus_id);
                return ERR_PTR(-EBUSY);
        }
 
@@ -287,6 +377,11 @@ struct phy_device *phy_attach(struct net_device *dev,
        if (phydev->drv->config_init) {
                int err;
 
+               err = phy_scan_fixups(phydev);
+
+               if (err < 0)
+                       return ERR_PTR(err);
+
                err = phydev->drv->config_init(phydev);
 
                if (err < 0)
@@ -395,6 +490,7 @@ EXPORT_SYMBOL(genphy_config_advert);
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
+       int err;
        int ctl = 0;
 
        phydev->pause = phydev->asym_pause = 0;
@@ -407,17 +503,26 @@ int genphy_setup_forced(struct phy_device *phydev)
        if (DUPLEX_FULL == phydev->duplex)
                ctl |= BMCR_FULLDPLX;
        
-       ctl = phy_write(phydev, MII_BMCR, ctl);
+       err = phy_write(phydev, MII_BMCR, ctl);
 
-       if (ctl < 0)
-               return ctl;
+       if (err < 0)
+               return err;
+
+       /*
+        * Run the fixups on this PHY, just in case the
+        * board code needs to change something after a reset
+        */
+       err = phy_scan_fixups(phydev);
+
+       if (err < 0)
+               return err;
 
        /* We just reset the device, so we'd better configure any
         * settings the PHY requires to operate */
        if (phydev->drv->config_init)
-               ctl = phydev->drv->config_init(phydev);
+               err = phydev->drv->config_init(phydev);
 
-       return ctl;
+       return err;
 }
 
 
index dcbe01b0ca0de05d3c21d75f5661858d88157232..157fd932e95140ce54936e3d0a79bdb30cd25048 100644 (file)
@@ -86,7 +86,7 @@
 #include "s2io.h"
 #include "s2io-regs.h"
 
-#define DRV_VERSION "2.0.26.20"
+#define DRV_VERSION "2.0.26.22"
 
 /* S2io Driver name & version. */
 static char s2io_driver_name[] = "Neterion";
@@ -117,20 +117,6 @@ static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 
 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
                                      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
-#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
-#define PANIC  1
-#define LOW    2
-static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
-{
-       struct mac_info *mac_control;
-
-       mac_control = &sp->mac_control;
-       if (rxb_size <= rxd_count[sp->rxd_mode])
-               return PANIC;
-       else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
-               return  LOW;
-       return 0;
-}
 
 static inline int is_s2io_card_up(const struct s2io_nic * sp)
 {
@@ -2458,7 +2444,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
        for (i = 0; i < config->tx_fifo_num; i++) {
                unsigned long flags;
                spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
-               for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
+               for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
                        txdp = (struct TxD *) \
                        mac_control->fifos[i].list_info[j].list_virt_addr;
                        skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
@@ -2544,7 +2530,6 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
        struct config_param *config;
        u64 tmp;
        struct buffAdd *ba;
-       unsigned long flags;
        struct RxD_t *first_rxdp = NULL;
        u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
        struct RxD1 *rxdp1;
@@ -2592,15 +2577,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
                        DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
                                  dev->name, rxdp);
                }
-               if(!napi) {
-                       spin_lock_irqsave(&nic->put_lock, flags);
-                       mac_control->rings[ring_no].put_pos =
-                       (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
-                       spin_unlock_irqrestore(&nic->put_lock, flags);
-               } else {
-                       mac_control->rings[ring_no].put_pos =
-                       (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
-               }
+
                if ((rxdp->Control_1 & RXD_OWN_XENA) &&
                        ((nic->rxd_mode == RXD_MODE_3B) &&
                                (rxdp->Control_2 & s2BIT(0)))) {
@@ -2978,7 +2955,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
 {
        struct s2io_nic *nic = ring_data->nic;
        struct net_device *dev = (struct net_device *) nic->dev;
-       int get_block, put_block, put_offset;
+       int get_block, put_block;
        struct rx_curr_get_info get_info, put_info;
        struct RxD_t *rxdp;
        struct sk_buff *skb;
@@ -2987,19 +2964,11 @@ static void rx_intr_handler(struct ring_info *ring_data)
        struct RxD1* rxdp1;
        struct RxD3* rxdp3;
 
-       spin_lock(&nic->rx_lock);
-
        get_info = ring_data->rx_curr_get_info;
        get_block = get_info.block_index;
        memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
        put_block = put_info.block_index;
        rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
-       if (!napi) {
-               spin_lock(&nic->put_lock);
-               put_offset = ring_data->put_pos;
-               spin_unlock(&nic->put_lock);
-       } else
-               put_offset = ring_data->put_pos;
 
        while (RXD_IS_UP2DT(rxdp)) {
                /*
@@ -3016,7 +2985,6 @@ static void rx_intr_handler(struct ring_info *ring_data)
                        DBG_PRINT(ERR_DBG, "%s: The skb is ",
                                  dev->name);
                        DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
-                       spin_unlock(&nic->rx_lock);
                        return;
                }
                if (nic->rxd_mode == RXD_MODE_1) {
@@ -3072,8 +3040,6 @@ static void rx_intr_handler(struct ring_info *ring_data)
                        }
                }
        }
-
-       spin_unlock(&nic->rx_lock);
 }
 
 /**
@@ -4105,7 +4071,6 @@ static int s2io_close(struct net_device *dev)
                        do_s2io_delete_unicast_mc(sp, tmp64);
        }
 
-       /* Reset card, kill tasklet and free Tx and Rx buffers. */
        s2io_card_down(sp);
 
        return 0;
@@ -4370,29 +4335,9 @@ s2io_alarm_handle(unsigned long data)
 
 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
 {
-       int rxb_size, level;
-
-       if (!sp->lro) {
-               rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
-               level = rx_buffer_level(sp, rxb_size, rng_n);
-
-               if ((level == PANIC) && (!TASKLET_IN_USE)) {
-                       int ret;
-                       DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
-                       DBG_PRINT(INTR_DBG, "PANIC levels\n");
-                       if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
-                               DBG_PRINT(INFO_DBG, "Out of memory in %s",
-                                         __FUNCTION__);
-                               clear_bit(0, (&sp->tasklet_status));
-                               return -1;
-                       }
-                       clear_bit(0, (&sp->tasklet_status));
-               } else if (level == LOW)
-                       tasklet_schedule(&sp->task);
-
-       } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
-                       DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
-                       DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
+       if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
+               DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
+               DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
        }
        return 0;
 }
@@ -6769,49 +6714,6 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
        return ret;
 }
 
-/**
- *  s2io_tasklet - Bottom half of the ISR.
- *  @dev_adr : address of the device structure in dma_addr_t format.
- *  Description:
- *  This is the tasklet or the bottom half of the ISR. This is
- *  an extension of the ISR which is scheduled by the scheduler to be run
- *  when the load on the CPU is low. All low priority tasks of the ISR can
- *  be pushed into the tasklet. For now the tasklet is used only to
- *  replenish the Rx buffers in the Rx buffer descriptors.
- *  Return value:
- *  void.
- */
-
-static void s2io_tasklet(unsigned long dev_addr)
-{
-       struct net_device *dev = (struct net_device *) dev_addr;
-       struct s2io_nic *sp = dev->priv;
-       int i, ret;
-       struct mac_info *mac_control;
-       struct config_param *config;
-
-       mac_control = &sp->mac_control;
-       config = &sp->config;
-
-       if (!TASKLET_IN_USE) {
-               for (i = 0; i < config->rx_ring_num; i++) {
-                       ret = fill_rx_buffers(sp, i);
-                       if (ret == -ENOMEM) {
-                               DBG_PRINT(INFO_DBG, "%s: Out of ",
-                                         dev->name);
-                               DBG_PRINT(INFO_DBG, "memory in tasklet\n");
-                               break;
-                       } else if (ret == -EFILL) {
-                               DBG_PRINT(INFO_DBG,
-                                         "%s: Rx Ring %d is full\n",
-                                         dev->name, i);
-                               break;
-                       }
-               }
-               clear_bit(0, (&sp->tasklet_status));
-       }
-}
-
 /**
  * s2io_set_link - Set the LInk status
  * @data: long pointer to device private structue
@@ -7161,7 +7063,6 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
 {
        int cnt = 0;
        struct XENA_dev_config __iomem *bar0 = sp->bar0;
-       unsigned long flags;
        register u64 val64 = 0;
        struct config_param *config;
        config = &sp->config;
@@ -7186,9 +7087,6 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
 
        s2io_rem_isr(sp);
 
-       /* Kill tasklet. */
-       tasklet_kill(&sp->task);
-
        /* Check if the device is Quiescent and then Reset the NIC */
        while(do_io) {
                /* As per the HW requirement we need to replenish the
@@ -7223,9 +7121,7 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
        free_tx_buffers(sp);
 
        /* Free all Rx buffers */
-       spin_lock_irqsave(&sp->rx_lock, flags);
        free_rx_buffers(sp);
-       spin_unlock_irqrestore(&sp->rx_lock, flags);
 
        clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
 }
@@ -7314,9 +7210,6 @@ static int s2io_card_up(struct s2io_nic * sp)
 
        S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
 
-       /* Enable tasklet for the device */
-       tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
-
        /*  Enable select interrupts */
        en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
        if (sp->config.intr_type != INTA)
@@ -8119,20 +8012,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        s2io_reset(sp);
 
        /*
-        * Initialize the tasklet status and link state flags
+        * Initialize link state flags
         * and the card state parameter
         */
-       sp->tasklet_status = 0;
        sp->state = 0;
 
        /* Initialize spinlocks */
        for (i = 0; i < sp->config.tx_fifo_num; i++)
                spin_lock_init(&mac_control->fifos[i].tx_lock);
 
-       if (!napi)
-               spin_lock_init(&sp->put_lock);
-       spin_lock_init(&sp->rx_lock);
-
        /*
         * SXE-002: Configure link and activity LED to init state
         * on driver load.
index e68fdf7e42600558ff7114772b58c8ed65c3ece7..ce53a02105f25ab99aee4e017c74b4c04a2e2600 100644 (file)
@@ -703,9 +703,6 @@ struct ring_info {
         */
        struct rx_curr_get_info rx_curr_get_info;
 
-       /* Index to the absolute position of the put pointer of Rx ring */
-       int put_pos;
-
        /* Buffer Address store. */
        struct buffAdd **ba;
        struct s2io_nic *nic;
@@ -868,8 +865,6 @@ struct s2io_nic {
        int device_enabled_once;
 
        char name[60];
-       struct tasklet_struct task;
-       volatile unsigned long tasklet_status;
 
        /* Timer that handles I/O errors/exceptions */
        struct timer_list alarm_timer;
@@ -879,8 +874,6 @@ struct s2io_nic {
 
        atomic_t rx_bufs_left[MAX_RX_RINGS];
 
-       spinlock_t put_lock;
-
 #define PROMISC     1
 #define ALL_MULTI   2
 
@@ -964,7 +957,6 @@ struct s2io_nic {
        u8              lro;
        u16             lro_max_aggr_per_sess;
        volatile unsigned long state;
-       spinlock_t      rx_lock;
        u64             general_int_mask;
 #define VPD_STRING_LEN 80
        u8  product_name[VPD_STRING_LEN];
@@ -1094,7 +1086,6 @@ static void s2io_handle_errors(void * dev_id);
 static int s2io_starter(void);
 static void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
-static void s2io_tasklet(unsigned long dev_addr);
 static void s2io_set_multicast(struct net_device *dev);
 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
 static void s2io_link(struct s2io_nic * sp, int link);
index 78994ede0cb0666945dc3d35e1061b1cc3cc8aee..6261201403cdd143ca592a880ca5052c167522d9 100644 (file)
@@ -825,7 +825,8 @@ static struct platform_driver sgiseeq_driver = {
        .probe  = sgiseeq_probe,
        .remove = __devexit_p(sgiseeq_remove),
        .driver = {
-               .name   = "sgiseeq"
+               .name   = "sgiseeq",
+               .owner  = THIS_MODULE,
        }
 };
 
@@ -850,3 +851,4 @@ module_exit(sgiseeq_module_exit);
 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiseeq");
index 76cc1d3adf715fc635a58db2c2b9df40789f7a56..4e28002051899c21f0a6050b5b32f14b6f2c6dec 100644 (file)
@@ -92,6 +92,7 @@ module_param(tx_fifo_kb, int, 0400);
 MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
 
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:smc911x");
 
 /*
  * The internal workings of the driver.  If you are changing anything
@@ -243,7 +244,7 @@ static void smc911x_reset(struct net_device *dev)
                do {
                        udelay(10);
                        reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_;
-               } while ( timeout-- && !reg);
+               } while (--timeout && !reg);
                if (timeout == 0) {
                        PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
                        return;
@@ -267,7 +268,7 @@ static void smc911x_reset(struct net_device *dev)
                                resets++;
                                break;
                        }
-               } while ( timeout-- && (reg & HW_CFG_SRST_));
+               } while (--timeout && (reg & HW_CFG_SRST_));
        }
        if (timeout == 0) {
                PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
@@ -413,7 +414,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
                do {
                        udelay(10);
                        reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_;
-               } while ( timeout-- && reg);
+               } while (--timeout && reg);
                if (timeout == 0) {
                        PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
                }
@@ -2262,6 +2263,7 @@ static struct platform_driver smc911x_driver = {
        .resume  = smc911x_drv_resume,
        .driver  = {
                .name    = CARDNAME,
+               .owner  = THIS_MODULE,
        },
 };
 
index 600b92af33349c1d5a2ed782f0aba89f0fa4a229..a188e33484e631326f366edd68b33f635fce94e6 100644 (file)
@@ -132,6 +132,7 @@ module_param(watchdog, int, 0400);
 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
 
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:smc91x");
 
 /*
  * The internal workings of the driver.  If you are changing anything
@@ -2308,6 +2309,7 @@ static struct platform_driver smc_driver = {
        .resume         = smc_drv_resume,
        .driver         = {
                .name   = CARDNAME,
+               .owner  = THIS_MODULE,
        },
 };
 
index 2cf6794acb4f9368a8f99ca94952c3766395fc97..854ccf2b4105efd1958a46eb942bb96928d511d2 100644 (file)
@@ -44,6 +44,7 @@ static const char sni_82596_string[] = "snirm_82596";
 MODULE_AUTHOR("Thomas Bogendoerfer");
 MODULE_DESCRIPTION("i82596 driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_82596");
 module_param(i596_debug, int, 0);
 MODULE_PARM_DESC(i596_debug, "82596 debug mask");
 
@@ -166,6 +167,7 @@ static struct platform_driver sni_82596_driver = {
        .remove = __devexit_p(sni_82596_driver_remove),
        .driver = {
                .name   = sni_82596_string,
+               .owner  = THIS_MODULE,
        },
 };
 
index 17585e5eed5379c2299f85f92a3956768850161e..e83b166aa6b9a53aeeb00a70224d10e46660f638 100644 (file)
@@ -625,6 +625,12 @@ static void __init bdx_firmware_endianess(void)
                s_firmLoad[i] = CPU_CHIP_SWAP32(s_firmLoad[i]);
 }
 
+static int bdx_range_check(struct bdx_priv *priv, u32 offset)
+{
+       return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
+               -EINVAL : 0;
+}
+
 static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
 {
        struct bdx_priv *priv = ndev->priv;
@@ -643,9 +649,15 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
                DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
        }
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        switch (data[0]) {
 
        case BDX_OP_READ:
+               error = bdx_range_check(priv, data[1]);
+               if (error < 0)
+                       return error;
                data[2] = READ_REG(priv, data[1]);
                DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
                    data[2]);
@@ -655,6 +667,9 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
                break;
 
        case BDX_OP_WRITE:
+               error = bdx_range_check(priv, data[1]);
+               if (error < 0)
+                       return error;
                WRITE_REG(priv, data[1], data[2]);
                DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
                break;
index bc4c62b8e81a4d7be3441bdc578daf3689646ab2..e3f74c9f78bd848f1e61aa6647f7109e7282fd7c 100644 (file)
@@ -4017,6 +4017,8 @@ static int tg3_halt(struct tg3 *, int, int);
  * Invoked with tp->lock held.
  */
 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        int err;
 
index 6f33f84d37b0631652d04825b58a554cc7477dc7..6017d5267d08f4c80939834385cc7d3c591ca8be 100644 (file)
@@ -162,6 +162,7 @@ static struct platform_driver tsi_eth_driver = {
        .remove = tsi108_ether_remove,
        .driver = {
                .name = "tsi-ethernet",
+               .owner = THIS_MODULE,
        },
 };
 
@@ -1729,3 +1730,4 @@ module_exit(tsi108_ether_exit);
 MODULE_AUTHOR("Tundra Semiconductor Corporation");
 MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tsi-ethernet");
index 333961bb7873973df7c75e5870aaee1f458ec47f..c0dd25ba7a18dcd9cc7969e85f1e02ce954af797 100644 (file)
@@ -2183,7 +2183,6 @@ typhoon_resume(struct pci_dev *pdev)
        }
 
        netif_device_attach(dev);
-       netif_start_queue(dev);
        return 0;
 
 reset:
index 2f11254bcc077fa22f506a1481221150429d3bee..281ce3d395324787c8b986f0dae5648e793aa853 100644 (file)
@@ -3932,7 +3932,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
        ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
        fixed_link = of_get_property(np, "fixed-link", NULL);
        if (fixed_link) {
-               ug_info->mdio_bus = 0;
+               snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0");
                ug_info->phy_address = fixed_link[0];
                phy = NULL;
        } else {
index ed1afaf683a4bb96481325165929542bf04e6db8..6b8d882d197b4c450e25c302e238d462718226c8 100644 (file)
@@ -605,7 +605,6 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
 static void velocity_init_cam_filter(struct velocity_info *vptr)
 {
        struct mac_regs __iomem * regs = vptr->mac_regs;
-       unsigned short vid;
 
        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
@@ -617,29 +616,33 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
        mac_set_cam_mask(regs, vptr->mCAMmask);
 
-       /* Enable first VCAM */
+       /* Enable VCAMs */
        if (vptr->vlgrp) {
-               for (vid = 0; vid < VLAN_VID_MASK; vid++) {
-                       if (vlan_group_get_device(vptr->vlgrp, vid)) {
-                               /* If Tagging option is enabled and
-                                  VLAN ID is not zero, then
-                                  turn on MCFG_RTGOPT also */
-                               if (vid != 0)
-                                       WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
+               unsigned int vid, i = 0;
+
+               if (!vlan_group_get_device(vptr->vlgrp, 0))
+                       WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
 
-                               mac_set_vlan_cam(regs, 0, (u8 *) &vid);
+               for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
+                       if (vlan_group_get_device(vptr->vlgrp, vid)) {
+                               mac_set_vlan_cam(regs, i, (u8 *) &vid);
+                               vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
+                               if (++i >= VCAM_SIZE)
+                                       break;
                        }
                }
-               vptr->vCAMmask[0] |= 1;
                mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
-       } else {
-               u16 temp = 0;
-               mac_set_vlan_cam(regs, 0, (u8 *) &temp);
-               temp = 1;
-               mac_set_vlan_cam_mask(regs, (u8 *) &temp);
        }
 }
 
+static void velocity_vlan_rx_register(struct net_device *dev,
+                                     struct vlan_group *grp)
+{
+       struct velocity_info *vptr = netdev_priv(dev);
+
+       vptr->vlgrp = grp;
+}
+
 static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
@@ -959,11 +962,13 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
 
        dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
        dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
+       dev->vlan_rx_register = velocity_vlan_rx_register;
 
 #ifdef  VELOCITY_ZERO_COPY_SUPPORT
        dev->features |= NETIF_F_SG;
 #endif
-       dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER;
+       dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+               NETIF_F_HW_VLAN_RX;
 
        if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
                dev->features |= NETIF_F_IP_CSUM;
@@ -1597,8 +1602,13 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
        skb_put(skb, pkt_len - 4);
        skb->protocol = eth_type_trans(skb, vptr->dev);
 
+       if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
+               vlan_hwaccel_rx(skb, vptr->vlgrp,
+                               swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
+       } else
+               netif_rx(skb);
+
        stats->rx_bytes += pkt_len;
-       netif_rx(skb);
 
        return 0;
 }
index c4c8eab8574f79bb36fc3352648d8df09ab579fb..c2cc42f723d5fdd517ca209183b0b147645cff4b 100644 (file)
@@ -402,7 +402,7 @@ static int __init c101_init(void)
 #ifdef MODULE
                printk(KERN_INFO "c101: no card initialized\n");
 #endif
-               return -ENOSYS; /* no parameters specified, abort */
+               return -EINVAL; /* no parameters specified, abort */
        }
 
        printk(KERN_INFO "%s\n", version);
@@ -420,11 +420,11 @@ static int __init c101_init(void)
                        c101_run(irq, ram);
 
                if (*hw == '\x0')
-                       return first_card ? 0 : -ENOSYS;
+                       return first_card ? 0 : -EINVAL;
        }while(*hw++ == ':');
 
        printk(KERN_ERR "c101: invalid hardware parameters\n");
-       return first_card ? 0 : -ENOSYS;
+       return first_card ? 0 : -EINVAL;
 }
 
 
index c4ab0326f91103b03dcd01dd7b1a153101ef4ea9..520bb0b1a9a2f6f1c6a74d00d9200631fda23bf5 100644 (file)
@@ -1090,10 +1090,6 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        pvc_device *pvc = NULL;
        struct net_device *dev;
        int result, used;
-       char * prefix = "pvc%d";
-
-       if (type == ARPHRD_ETHER)
-               prefix = "pvceth%d";
 
        if ((pvc = add_pvc(frad, dlci)) == NULL) {
                printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
index 7483d45bc5bccffefd18b4fa94122eb78d40460f..e62018a36133604bf5fdcae1a95268bb0c8531b6 100644 (file)
@@ -1809,3 +1809,5 @@ module_exit(netif_exit);
 
 MODULE_DESCRIPTION("Xen virtual network device frontend");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:vif");
+MODULE_ALIAS("xennet");
index 1bd5fb30237d97a8037e5cc795ec41f1fef03a9d..e3dc8f8d0c3ec082530c846d99cce79d8a6f1892 100644 (file)
@@ -1930,6 +1930,20 @@ config FB_VIRTUAL
 
          If unsure, say N.
 
+config XEN_FBDEV_FRONTEND
+       tristate "Xen virtual frame buffer support"
+       depends on FB && XEN
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       select FB_SYS_FOPS
+       select FB_DEFERRED_IO
+       default y
+       help
+         This driver implements the front-end of the Xen virtual
+         frame buffer driver.  It communicates with a back-end
+         in another domain.
+
 source "drivers/video/omap/Kconfig"
 
 source "drivers/video/backlight/Kconfig"
index 11c0e5e05f219a36e5eda7ed3f0336d4ed54b32a..f172b9b73314ebc21845439919f6c18e63fd79cd 100644 (file)
@@ -114,6 +114,7 @@ obj-$(CONFIG_FB_PS3)                  += ps3fb.o
 obj-$(CONFIG_FB_SM501)            += sm501fb.o
 obj-$(CONFIG_FB_XILINX)           += xilinxfb.o
 obj-$(CONFIG_FB_OMAP)             += omap/
+obj-$(CONFIG_XEN_FBDEV_FRONTEND)  += xen-fbfront.o
 
 # Platform or fallback drivers go here
 obj-$(CONFIG_FB_UVESA)            += uvesafb.o
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
new file mode 100644 (file)
index 0000000..619a6f8
--- /dev/null
@@ -0,0 +1,550 @@
+/*
+ * Xen para-virtual frame buffer device
+ *
+ * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ *
+ *  Based on linux/drivers/video/q40fb.c
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive for
+ *  more details.
+ */
+
+/*
+ * TODO:
+ *
+ * Switch to grant tables when they become capable of dealing with the
+ * frame buffer.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <xen/interface/io/fbif.h>
+#include <xen/interface/io/protocols.h>
+#include <xen/xenbus.h>
+
+struct xenfb_info {
+       unsigned char           *fb;
+       struct fb_info          *fb_info;
+       int                     x1, y1, x2, y2; /* dirty rectangle,
+                                                  protected by dirty_lock */
+       spinlock_t              dirty_lock;
+       int                     nr_pages;
+       int                     irq;
+       struct xenfb_page       *page;
+       unsigned long           *mfns;
+       int                     update_wanted; /* XENFB_TYPE_UPDATE wanted */
+
+       struct xenbus_device    *xbdev;
+};
+
+static u32 xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
+
+static int xenfb_remove(struct xenbus_device *);
+static void xenfb_init_shared_page(struct xenfb_info *);
+static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
+static void xenfb_disconnect_backend(struct xenfb_info *);
+
+static void xenfb_do_update(struct xenfb_info *info,
+                           int x, int y, int w, int h)
+{
+       union xenfb_out_event event;
+       u32 prod;
+
+       event.type = XENFB_TYPE_UPDATE;
+       event.update.x = x;
+       event.update.y = y;
+       event.update.width = w;
+       event.update.height = h;
+
+       prod = info->page->out_prod;
+       /* caller ensures !xenfb_queue_full() */
+       mb();                   /* ensure ring space available */
+       XENFB_OUT_RING_REF(info->page, prod) = event;
+       wmb();                  /* ensure ring contents visible */
+       info->page->out_prod = prod + 1;
+
+       notify_remote_via_irq(info->irq);
+}
+
+static int xenfb_queue_full(struct xenfb_info *info)
+{
+       u32 cons, prod;
+
+       prod = info->page->out_prod;
+       cons = info->page->out_cons;
+       return prod - cons == XENFB_OUT_RING_LEN;
+}
+
+static void xenfb_refresh(struct xenfb_info *info,
+                         int x1, int y1, int w, int h)
+{
+       unsigned long flags;
+       int y2 = y1 + h - 1;
+       int x2 = x1 + w - 1;
+
+       if (!info->update_wanted)
+               return;
+
+       spin_lock_irqsave(&info->dirty_lock, flags);
+
+       /* Combine with dirty rectangle: */
+       if (info->y1 < y1)
+               y1 = info->y1;
+       if (info->y2 > y2)
+               y2 = info->y2;
+       if (info->x1 < x1)
+               x1 = info->x1;
+       if (info->x2 > x2)
+               x2 = info->x2;
+
+       if (xenfb_queue_full(info)) {
+               /* Can't send right now, stash it in the dirty rectangle */
+               info->x1 = x1;
+               info->x2 = x2;
+               info->y1 = y1;
+               info->y2 = y2;
+               spin_unlock_irqrestore(&info->dirty_lock, flags);
+               return;
+       }
+
+       /* Clear dirty rectangle: */
+       info->x1 = info->y1 = INT_MAX;
+       info->x2 = info->y2 = 0;
+
+       spin_unlock_irqrestore(&info->dirty_lock, flags);
+
+       if (x1 <= x2 && y1 <= y2)
+               xenfb_do_update(info, x1, y1, x2 - x1 + 1, y2 - y1 + 1);
+}
+
+static void xenfb_deferred_io(struct fb_info *fb_info,
+                             struct list_head *pagelist)
+{
+       struct xenfb_info *info = fb_info->par;
+       struct page *page;
+       unsigned long beg, end;
+       int y1, y2, miny, maxy;
+
+       miny = INT_MAX;
+       maxy = 0;
+       list_for_each_entry(page, pagelist, lru) {
+               beg = page->index << PAGE_SHIFT;
+               end = beg + PAGE_SIZE - 1;
+               y1 = beg / fb_info->fix.line_length;
+               y2 = end / fb_info->fix.line_length;
+               if (y2 >= fb_info->var.yres)
+                       y2 = fb_info->var.yres - 1;
+               if (miny > y1)
+                       miny = y1;
+               if (maxy < y2)
+                       maxy = y2;
+       }
+       xenfb_refresh(info, 0, miny, fb_info->var.xres, maxy - miny + 1);
+}
+
+static struct fb_deferred_io xenfb_defio = {
+       .delay          = HZ / 20,
+       .deferred_io    = xenfb_deferred_io,
+};
+
+static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp,
+                          struct fb_info *info)
+{
+       u32 v;
+
+       if (regno > info->cmap.len)
+               return 1;
+
+#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
+       red = CNVT_TOHW(red, info->var.red.length);
+       green = CNVT_TOHW(green, info->var.green.length);
+       blue = CNVT_TOHW(blue, info->var.blue.length);
+       transp = CNVT_TOHW(transp, info->var.transp.length);
+#undef CNVT_TOHW
+
+       v = (red << info->var.red.offset) |
+           (green << info->var.green.offset) |
+           (blue << info->var.blue.offset);
+
+       switch (info->var.bits_per_pixel) {
+       case 16:
+       case 24:
+       case 32:
+               ((u32 *)info->pseudo_palette)[regno] = v;
+               break;
+       }
+
+       return 0;
+}
+
+static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
+{
+       struct xenfb_info *info = p->par;
+
+       sys_fillrect(p, rect);
+       xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
+{
+       struct xenfb_info *info = p->par;
+
+       sys_imageblit(p, image);
+       xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
+}
+
+static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+{
+       struct xenfb_info *info = p->par;
+
+       sys_copyarea(p, area);
+       xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
+}
+
+static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
+                       size_t count, loff_t *ppos)
+{
+       struct xenfb_info *info = p->par;
+       ssize_t res;
+
+       res = fb_sys_write(p, buf, count, ppos);
+       xenfb_refresh(info, 0, 0, info->page->width, info->page->height);
+       return res;
+}
+
+static struct fb_ops xenfb_fb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_read        = fb_sys_read,
+       .fb_write       = xenfb_write,
+       .fb_setcolreg   = xenfb_setcolreg,
+       .fb_fillrect    = xenfb_fillrect,
+       .fb_copyarea    = xenfb_copyarea,
+       .fb_imageblit   = xenfb_imageblit,
+};
+
+static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
+{
+       /*
+        * No in events recognized, simply ignore them all.
+        * If you need to recognize some, see xen-kbdfront's
+        * input_handler() for how to do that.
+        */
+       struct xenfb_info *info = dev_id;
+       struct xenfb_page *page = info->page;
+
+       if (page->in_cons != page->in_prod) {
+               info->page->in_cons = info->page->in_prod;
+               notify_remote_via_irq(info->irq);
+       }
+
+       /* Flush dirty rectangle: */
+       xenfb_refresh(info, INT_MAX, INT_MAX, -INT_MAX, -INT_MAX);
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit xenfb_probe(struct xenbus_device *dev,
+                                const struct xenbus_device_id *id)
+{
+       struct xenfb_info *info;
+       struct fb_info *fb_info;
+       int ret;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (info == NULL) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+               return -ENOMEM;
+       }
+       dev->dev.driver_data = info;
+       info->xbdev = dev;
+       info->irq = -1;
+       info->x1 = info->y1 = INT_MAX;
+       spin_lock_init(&info->dirty_lock);
+
+       info->fb = vmalloc(xenfb_mem_len);
+       if (info->fb == NULL)
+               goto error_nomem;
+       memset(info->fb, 0, xenfb_mem_len);
+
+       info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
+       if (!info->mfns)
+               goto error_nomem;
+
+       /* set up shared page */
+       info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       if (!info->page)
+               goto error_nomem;
+
+       xenfb_init_shared_page(info);
+
+       /* abusing framebuffer_alloc() to allocate pseudo_palette */
+       fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
+       if (fb_info == NULL)
+               goto error_nomem;
+
+       /* complete the abuse: */
+       fb_info->pseudo_palette = fb_info->par;
+       fb_info->par = info;
+
+       fb_info->screen_base = info->fb;
+
+       fb_info->fbops = &xenfb_fb_ops;
+       fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
+       fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
+       fb_info->var.bits_per_pixel = info->page->depth;
+
+       fb_info->var.red = (struct fb_bitfield){16, 8, 0};
+       fb_info->var.green = (struct fb_bitfield){8, 8, 0};
+       fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
+
+       fb_info->var.activate = FB_ACTIVATE_NOW;
+       fb_info->var.height = -1;
+       fb_info->var.width = -1;
+       fb_info->var.vmode = FB_VMODE_NONINTERLACED;
+
+       fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
+       fb_info->fix.line_length = info->page->line_length;
+       fb_info->fix.smem_start = 0;
+       fb_info->fix.smem_len = xenfb_mem_len;
+       strcpy(fb_info->fix.id, "xen");
+       fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
+       fb_info->fix.accel = FB_ACCEL_NONE;
+
+       fb_info->flags = FBINFO_FLAG_DEFAULT;
+
+       ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
+       if (ret < 0) {
+               framebuffer_release(fb_info);
+               xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
+               goto error;
+       }
+
+       fb_info->fbdefio = &xenfb_defio;
+       fb_deferred_io_init(fb_info);
+
+       ret = register_framebuffer(fb_info);
+       if (ret) {
+               fb_deferred_io_cleanup(fb_info);
+               fb_dealloc_cmap(&fb_info->cmap);
+               framebuffer_release(fb_info);
+               xenbus_dev_fatal(dev, ret, "register_framebuffer");
+               goto error;
+       }
+       info->fb_info = fb_info;
+
+       ret = xenfb_connect_backend(dev, info);
+       if (ret < 0)
+               goto error;
+
+       return 0;
+
+ error_nomem:
+       ret = -ENOMEM;
+       xenbus_dev_fatal(dev, ret, "allocating device memory");
+ error:
+       xenfb_remove(dev);
+       return ret;
+}
+
+static int xenfb_resume(struct xenbus_device *dev)
+{
+       struct xenfb_info *info = dev->dev.driver_data;
+
+       xenfb_disconnect_backend(info);
+       xenfb_init_shared_page(info);
+       return xenfb_connect_backend(dev, info);
+}
+
+static int xenfb_remove(struct xenbus_device *dev)
+{
+       struct xenfb_info *info = dev->dev.driver_data;
+
+       xenfb_disconnect_backend(info);
+       if (info->fb_info) {
+               fb_deferred_io_cleanup(info->fb_info);
+               unregister_framebuffer(info->fb_info);
+               fb_dealloc_cmap(&info->fb_info->cmap);
+               framebuffer_release(info->fb_info);
+       }
+       free_page((unsigned long)info->page);
+       vfree(info->mfns);
+       vfree(info->fb);
+       kfree(info);
+
+       return 0;
+}
+
+static unsigned long vmalloc_to_mfn(void *address)
+{
+       return pfn_to_mfn(vmalloc_to_pfn(address));
+}
+
+static void xenfb_init_shared_page(struct xenfb_info *info)
+{
+       int i;
+
+       for (i = 0; i < info->nr_pages; i++)
+               info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
+
+       info->page->pd[0] = vmalloc_to_mfn(info->mfns);
+       info->page->pd[1] = 0;
+       info->page->width = XENFB_WIDTH;
+       info->page->height = XENFB_HEIGHT;
+       info->page->depth = XENFB_DEPTH;
+       info->page->line_length = (info->page->depth / 8) * info->page->width;
+       info->page->mem_length = xenfb_mem_len;
+       info->page->in_cons = info->page->in_prod = 0;
+       info->page->out_cons = info->page->out_prod = 0;
+}
+
+static int xenfb_connect_backend(struct xenbus_device *dev,
+                                struct xenfb_info *info)
+{
+       int ret, evtchn;
+       struct xenbus_transaction xbt;
+
+       ret = xenbus_alloc_evtchn(dev, &evtchn);
+       if (ret)
+               return ret;
+       ret = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
+                                       0, dev->devicetype, info);
+       if (ret < 0) {
+               xenbus_free_evtchn(dev, evtchn);
+               xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
+               return ret;
+       }
+       info->irq = ret;
+
+ again:
+       ret = xenbus_transaction_start(&xbt);
+       if (ret) {
+               xenbus_dev_fatal(dev, ret, "starting transaction");
+               return ret;
+       }
+       ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
+                           virt_to_mfn(info->page));
+       if (ret)
+               goto error_xenbus;
+       ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+                           evtchn);
+       if (ret)
+               goto error_xenbus;
+       ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
+                           XEN_IO_PROTO_ABI_NATIVE);
+       if (ret)
+               goto error_xenbus;
+       ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
+       if (ret)
+               goto error_xenbus;
+       ret = xenbus_transaction_end(xbt, 0);
+       if (ret) {
+               if (ret == -EAGAIN)
+                       goto again;
+               xenbus_dev_fatal(dev, ret, "completing transaction");
+               return ret;
+       }
+
+       xenbus_switch_state(dev, XenbusStateInitialised);
+       return 0;
+
+ error_xenbus:
+       xenbus_transaction_end(xbt, 1);
+       xenbus_dev_fatal(dev, ret, "writing xenstore");
+       return ret;
+}
+
+static void xenfb_disconnect_backend(struct xenfb_info *info)
+{
+       if (info->irq >= 0)
+               unbind_from_irqhandler(info->irq, info);
+       info->irq = -1;
+}
+
+static void xenfb_backend_changed(struct xenbus_device *dev,
+                                 enum xenbus_state backend_state)
+{
+       struct xenfb_info *info = dev->dev.driver_data;
+       int val;
+
+       switch (backend_state) {
+       case XenbusStateInitialising:
+       case XenbusStateInitialised:
+       case XenbusStateUnknown:
+       case XenbusStateClosed:
+               break;
+
+       case XenbusStateInitWait:
+InitWait:
+               xenbus_switch_state(dev, XenbusStateConnected);
+               break;
+
+       case XenbusStateConnected:
+               /*
+                * Work around xenbus race condition: If backend goes
+                * through InitWait to Connected fast enough, we can
+                * get Connected twice here.
+                */
+               if (dev->state != XenbusStateConnected)
+                       goto InitWait; /* no InitWait seen yet, fudge it */
+
+               if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                                "request-update", "%d", &val) < 0)
+                       val = 0;
+               if (val)
+                       info->update_wanted = 1;
+               break;
+
+       case XenbusStateClosing:
+               xenbus_frontend_closed(dev);
+               break;
+       }
+}
+
+static struct xenbus_device_id xenfb_ids[] = {
+       { "vfb" },
+       { "" }
+};
+
+static struct xenbus_driver xenfb = {
+       .name = "vfb",
+       .owner = THIS_MODULE,
+       .ids = xenfb_ids,
+       .probe = xenfb_probe,
+       .remove = xenfb_remove,
+       .resume = xenfb_resume,
+       .otherend_changed = xenfb_backend_changed,
+};
+
+static int __init xenfb_init(void)
+{
+       if (!is_running_on_xen())
+               return -ENODEV;
+
+       /* Nothing to do if running in dom0. */
+       if (is_initial_xendomain())
+               return -ENODEV;
+
+       return xenbus_register_frontend(&xenfb);
+}
+
+static void __exit xenfb_cleanup(void)
+{
+       xenbus_unregister_driver(&xenfb);
+}
+
+module_init(xenfb_init);
+module_exit(xenfb_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
new file mode 100644 (file)
index 0000000..4b75a16
--- /dev/null
@@ -0,0 +1,19 @@
+config XEN_BALLOON
+       bool "Xen memory balloon driver"
+       depends on XEN
+       default y
+       help
+         The balloon driver allows the Xen domain to request more memory from
+         the system to expand the domain's memory allocation, or alternatively
+         return unneeded memory to the system.
+
+config XEN_SCRUB_PAGES
+       bool "Scrub pages before returning them to system"
+       depends on XEN_BALLOON
+       default y
+       help
+         Scrub pages before returning them to the system for reuse by
+         other domains.  This makes sure that any confidential data
+         is not accidentally visible to other domains.  Is it more
+         secure, but slightly less efficient.
+         If in doubt, say yes.
index 56592f0d6cefff998f305bcb4a0125f9bed28e0b..37af04f1ffd90d98a6ed33d5f29166d9d3747487 100644 (file)
@@ -1,2 +1,4 @@
-obj-y  += grant-table.o
+obj-y  += grant-table.o features.o events.o
 obj-y  += xenbus/
+obj-$(CONFIG_XEN_XENCOMM)      += xencomm.o
+obj-$(CONFIG_XEN_BALLOON)      += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
new file mode 100644 (file)
index 0000000..ab25ba6
--- /dev/null
@@ -0,0 +1,712 @@
+/******************************************************************************
+ * balloon.c
+ *
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+
+#include <xen/interface/memory.h>
+#include <xen/balloon.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+#include <xen/page.h>
+
+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
+
+#define BALLOON_CLASS_NAME "memory"
+
+struct balloon_stats {
+       /* We aim for 'current allocation' == 'target allocation'. */
+       unsigned long current_pages;
+       unsigned long target_pages;
+       /* We may hit the hard limit in Xen. If we do then we remember it. */
+       unsigned long hard_limit;
+       /*
+        * Drivers may alter the memory reservation independently, but they
+        * must inform the balloon driver so we avoid hitting the hard limit.
+        */
+       unsigned long driver_pages;
+       /* Number of pages in high- and low-memory balloons. */
+       unsigned long balloon_low;
+       unsigned long balloon_high;
+};
+
+static DEFINE_MUTEX(balloon_mutex);
+
+static struct sys_device balloon_sysdev;
+
+static int register_balloon(struct sys_device *sysdev);
+
+/*
+ * Protects atomic reservation decrease/increase against concurrent increases.
+ * Also protects non-atomic updates of current_pages and driver_pages, and
+ * balloon lists.
+ */
+static DEFINE_SPINLOCK(balloon_lock);
+
+static struct balloon_stats balloon_stats;
+
+/* We increase/decrease in batches which fit in a page */
+static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
+
+/* VM /proc information for memory */
+extern unsigned long totalram_pages;
+
+#ifdef CONFIG_HIGHMEM
+extern unsigned long totalhigh_pages;
+#define inc_totalhigh_pages() (totalhigh_pages++)
+#define dec_totalhigh_pages() (totalhigh_pages--)
+#else
+#define inc_totalhigh_pages() do {} while(0)
+#define dec_totalhigh_pages() do {} while(0)
+#endif
+
+/* List of ballooned pages, threaded through the mem_map array. */
+static LIST_HEAD(ballooned_pages);
+
+/* Main work function, always executed in process context. */
+static void balloon_process(struct work_struct *work);
+static DECLARE_WORK(balloon_worker, balloon_process);
+static struct timer_list balloon_timer;
+
+/* When ballooning out (allocating memory to return to Xen) we don't really
+   want the kernel to try too hard since that can trigger the oom killer. */
+#define GFP_BALLOON \
+       (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
+
+static void scrub_page(struct page *page)
+{
+#ifdef CONFIG_XEN_SCRUB_PAGES
+       if (PageHighMem(page)) {
+               void *v = kmap(page);
+               clear_page(v);
+               kunmap(v);
+       } else {
+               void *v = page_address(page);
+               clear_page(v);
+       }
+#endif
+}
+
+/* balloon_append: add the given page to the balloon. */
+static void balloon_append(struct page *page)
+{
+       /* Lowmem is re-populated first, so highmem pages go at list tail. */
+       if (PageHighMem(page)) {
+               list_add_tail(&page->lru, &ballooned_pages);
+               balloon_stats.balloon_high++;
+               dec_totalhigh_pages();
+       } else {
+               list_add(&page->lru, &ballooned_pages);
+               balloon_stats.balloon_low++;
+       }
+}
+
+/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
+static struct page *balloon_retrieve(void)
+{
+       struct page *page;
+
+       if (list_empty(&ballooned_pages))
+               return NULL;
+
+       page = list_entry(ballooned_pages.next, struct page, lru);
+       list_del(&page->lru);
+
+       if (PageHighMem(page)) {
+               balloon_stats.balloon_high--;
+               inc_totalhigh_pages();
+       }
+       else
+               balloon_stats.balloon_low--;
+
+       return page;
+}
+
+static struct page *balloon_first_page(void)
+{
+       if (list_empty(&ballooned_pages))
+               return NULL;
+       return list_entry(ballooned_pages.next, struct page, lru);
+}
+
+static struct page *balloon_next_page(struct page *page)
+{
+       struct list_head *next = page->lru.next;
+       if (next == &ballooned_pages)
+               return NULL;
+       return list_entry(next, struct page, lru);
+}
+
+static void balloon_alarm(unsigned long unused)
+{
+       schedule_work(&balloon_worker);
+}
+
+static unsigned long current_target(void)
+{
+       unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit);
+
+       target = min(target,
+                    balloon_stats.current_pages +
+                    balloon_stats.balloon_low +
+                    balloon_stats.balloon_high);
+
+       return target;
+}
+
+static int increase_reservation(unsigned long nr_pages)
+{
+       unsigned long  pfn, i, flags;
+       struct page   *page;
+       long           rc;
+       struct xen_memory_reservation reservation = {
+               .address_bits = 0,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+
+       if (nr_pages > ARRAY_SIZE(frame_list))
+               nr_pages = ARRAY_SIZE(frame_list);
+
+       spin_lock_irqsave(&balloon_lock, flags);
+
+       page = balloon_first_page();
+       for (i = 0; i < nr_pages; i++) {
+               BUG_ON(page == NULL);
+               frame_list[i] = page_to_pfn(page);;
+               page = balloon_next_page(page);
+       }
+
+       reservation.extent_start = (unsigned long)frame_list;
+       reservation.nr_extents   = nr_pages;
+       rc = HYPERVISOR_memory_op(
+               XENMEM_populate_physmap, &reservation);
+       if (rc < nr_pages) {
+               if (rc > 0) {
+                       int ret;
+
+                       /* We hit the Xen hard limit: reprobe. */
+                       reservation.nr_extents = rc;
+                       ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                       &reservation);
+                       BUG_ON(ret != rc);
+               }
+               if (rc >= 0)
+                       balloon_stats.hard_limit = (balloon_stats.current_pages + rc -
+                                                   balloon_stats.driver_pages);
+               goto out;
+       }
+
+       for (i = 0; i < nr_pages; i++) {
+               page = balloon_retrieve();
+               BUG_ON(page == NULL);
+
+               pfn = page_to_pfn(page);
+               BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
+                      phys_to_machine_mapping_valid(pfn));
+
+               set_phys_to_machine(pfn, frame_list[i]);
+
+               /* Link back into the page tables if not highmem. */
+               if (pfn < max_low_pfn) {
+                       int ret;
+                       ret = HYPERVISOR_update_va_mapping(
+                               (unsigned long)__va(pfn << PAGE_SHIFT),
+                               mfn_pte(frame_list[i], PAGE_KERNEL),
+                               0);
+                       BUG_ON(ret);
+               }
+
+               /* Relinquish the page back to the allocator. */
+               ClearPageReserved(page);
+               init_page_count(page);
+               __free_page(page);
+       }
+
+       balloon_stats.current_pages += nr_pages;
+       totalram_pages = balloon_stats.current_pages;
+
+ out:
+       spin_unlock_irqrestore(&balloon_lock, flags);
+
+       return 0;
+}
+
+static int decrease_reservation(unsigned long nr_pages)
+{
+       unsigned long  pfn, i, flags;
+       struct page   *page;
+       int            need_sleep = 0;
+       int ret;
+       struct xen_memory_reservation reservation = {
+               .address_bits = 0,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+
+       if (nr_pages > ARRAY_SIZE(frame_list))
+               nr_pages = ARRAY_SIZE(frame_list);
+
+       for (i = 0; i < nr_pages; i++) {
+               if ((page = alloc_page(GFP_BALLOON)) == NULL) {
+                       nr_pages = i;
+                       need_sleep = 1;
+                       break;
+               }
+
+               pfn = page_to_pfn(page);
+               frame_list[i] = pfn_to_mfn(pfn);
+
+               scrub_page(page);
+       }
+
+       /* Ensure that ballooned highmem pages don't have kmaps. */
+       kmap_flush_unused();
+       flush_tlb_all();
+
+       spin_lock_irqsave(&balloon_lock, flags);
+
+       /* No more mappings: invalidate P2M and add to balloon. */
+       for (i = 0; i < nr_pages; i++) {
+               pfn = mfn_to_pfn(frame_list[i]);
+               set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+               balloon_append(pfn_to_page(pfn));
+       }
+
+       reservation.extent_start = (unsigned long)frame_list;
+       reservation.nr_extents   = nr_pages;
+       ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+       BUG_ON(ret != nr_pages);
+
+       balloon_stats.current_pages -= nr_pages;
+       totalram_pages = balloon_stats.current_pages;
+
+       spin_unlock_irqrestore(&balloon_lock, flags);
+
+       return need_sleep;
+}
+
+/*
+ * We avoid multiple worker processes conflicting via the balloon mutex.
+ * We may of course race updates of the target counts (which are protected
+ * by the balloon lock), or with changes to the Xen hard limit, but we will
+ * recover from these in time.
+ */
+static void balloon_process(struct work_struct *work)
+{
+       int need_sleep = 0;
+       long credit;
+
+       mutex_lock(&balloon_mutex);
+
+       do {
+               credit = current_target() - balloon_stats.current_pages;
+               if (credit > 0)
+                       need_sleep = (increase_reservation(credit) != 0);
+               if (credit < 0)
+                       need_sleep = (decrease_reservation(-credit) != 0);
+
+#ifndef CONFIG_PREEMPT
+               if (need_resched())
+                       schedule();
+#endif
+       } while ((credit != 0) && !need_sleep);
+
+       /* Schedule more work if there is some still to be done. */
+       if (current_target() != balloon_stats.current_pages)
+               mod_timer(&balloon_timer, jiffies + HZ);
+
+       mutex_unlock(&balloon_mutex);
+}
+
+/* Resets the Xen limit, sets new target, and kicks off processing. */
+void balloon_set_new_target(unsigned long target)
+{
+       /* No need for lock. Not read-modify-write updates. */
+       balloon_stats.hard_limit   = ~0UL;
+       balloon_stats.target_pages = target;
+       schedule_work(&balloon_worker);
+}
+
+static struct xenbus_watch target_watch =
+{
+       .node = "memory/target"
+};
+
+/* React to a change in the target key */
+static void watch_target(struct xenbus_watch *watch,
+                        const char **vec, unsigned int len)
+{
+       unsigned long long new_target;
+       int err;
+
+       err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
+       if (err != 1) {
+               /* This is ok (for domain0 at least) - so just return */
+               return;
+       }
+
+       /* The given memory/target value is in KiB, so it needs converting to
+        * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
+        */
+       balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+}
+
+static int balloon_init_watcher(struct notifier_block *notifier,
+                               unsigned long event,
+                               void *data)
+{
+       int err;
+
+       err = register_xenbus_watch(&target_watch);
+       if (err)
+               printk(KERN_ERR "Failed to set balloon watcher\n");
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block xenstore_notifier;
+
+static int __init balloon_init(void)
+{
+       unsigned long pfn;
+       struct page *page;
+
+       if (!is_running_on_xen())
+               return -ENODEV;
+
+       pr_info("xen_balloon: Initialising balloon driver.\n");
+
+       balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
+       totalram_pages   = balloon_stats.current_pages;
+       balloon_stats.target_pages  = balloon_stats.current_pages;
+       balloon_stats.balloon_low   = 0;
+       balloon_stats.balloon_high  = 0;
+       balloon_stats.driver_pages  = 0UL;
+       balloon_stats.hard_limit    = ~0UL;
+
+       init_timer(&balloon_timer);
+       balloon_timer.data = 0;
+       balloon_timer.function = balloon_alarm;
+
+       register_balloon(&balloon_sysdev);
+
+       /* Initialise the balloon with excess memory space. */
+       for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
+               page = pfn_to_page(pfn);
+               if (!PageReserved(page))
+                       balloon_append(page);
+       }
+
+       target_watch.callback = watch_target;
+       xenstore_notifier.notifier_call = balloon_init_watcher;
+
+       register_xenstore_notifier(&xenstore_notifier);
+
+       return 0;
+}
+
+subsys_initcall(balloon_init);
+
+static void balloon_exit(void)
+{
+    /* XXX - release balloon here */
+    return;
+}
+
+module_exit(balloon_exit);
+
+static void balloon_update_driver_allowance(long delta)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&balloon_lock, flags);
+       balloon_stats.driver_pages += delta;
+       spin_unlock_irqrestore(&balloon_lock, flags);
+}
+
+static int dealloc_pte_fn(
+       pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+{
+       unsigned long mfn = pte_mfn(*pte);
+       int ret;
+       struct xen_memory_reservation reservation = {
+               .nr_extents   = 1,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+       reservation.extent_start = (unsigned long)&mfn;
+       set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
+       set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+       ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+       BUG_ON(ret != 1);
+       return 0;
+}
+
+static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
+{
+       unsigned long vaddr, flags;
+       struct page *page, **pagevec;
+       int i, ret;
+
+       pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
+       if (pagevec == NULL)
+               return NULL;
+
+       for (i = 0; i < nr_pages; i++) {
+               page = pagevec[i] = alloc_page(GFP_KERNEL);
+               if (page == NULL)
+                       goto err;
+
+               vaddr = (unsigned long)page_address(page);
+
+               scrub_page(page);
+
+               spin_lock_irqsave(&balloon_lock, flags);
+
+               if (xen_feature(XENFEAT_auto_translated_physmap)) {
+                       unsigned long gmfn = page_to_pfn(page);
+                       struct xen_memory_reservation reservation = {
+                               .nr_extents   = 1,
+                               .extent_order = 0,
+                               .domid        = DOMID_SELF
+                       };
+                       reservation.extent_start = (unsigned long)&gmfn;
+                       ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                                  &reservation);
+                       if (ret == 1)
+                               ret = 0; /* success */
+               } else {
+                       ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
+                                                 dealloc_pte_fn, NULL);
+               }
+
+               if (ret != 0) {
+                       spin_unlock_irqrestore(&balloon_lock, flags);
+                       __free_page(page);
+                       goto err;
+               }
+
+               totalram_pages = --balloon_stats.current_pages;
+
+               spin_unlock_irqrestore(&balloon_lock, flags);
+       }
+
+ out:
+       schedule_work(&balloon_worker);
+       flush_tlb_all();
+       return pagevec;
+
+ err:
+       spin_lock_irqsave(&balloon_lock, flags);
+       while (--i >= 0)
+               balloon_append(pagevec[i]);
+       spin_unlock_irqrestore(&balloon_lock, flags);
+       kfree(pagevec);
+       pagevec = NULL;
+       goto out;
+}
+
+static void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
+{
+       unsigned long flags;
+       int i;
+
+       if (pagevec == NULL)
+               return;
+
+       spin_lock_irqsave(&balloon_lock, flags);
+       for (i = 0; i < nr_pages; i++) {
+               BUG_ON(page_count(pagevec[i]) != 1);
+               balloon_append(pagevec[i]);
+       }
+       spin_unlock_irqrestore(&balloon_lock, flags);
+
+       kfree(pagevec);
+
+       schedule_work(&balloon_worker);
+}
+
+static void balloon_release_driver_page(struct page *page)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&balloon_lock, flags);
+       balloon_append(page);
+       balloon_stats.driver_pages--;
+       spin_unlock_irqrestore(&balloon_lock, flags);
+
+       schedule_work(&balloon_worker);
+}
+
+
+#define BALLOON_SHOW(name, format, args...)                    \
+       static ssize_t show_##name(struct sys_device *dev,      \
+                                  char *buf)                   \
+       {                                                       \
+               return sprintf(buf, format, ##args);            \
+       }                                                       \
+       static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+
+BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
+BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
+BALLOON_SHOW(hard_limit_kb,
+            (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n",
+            (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
+BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
+
+static ssize_t show_target_kb(struct sys_device *dev, char *buf)
+{
+       return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
+}
+
+static ssize_t store_target_kb(struct sys_device *dev,
+                              const char *buf,
+                              size_t count)
+{
+       char memstring[64], *endchar;
+       unsigned long long target_bytes;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (count <= 1)
+               return -EBADMSG; /* runt */
+       if (count > sizeof(memstring))
+               return -EFBIG;   /* too long */
+       strcpy(memstring, buf);
+
+       target_bytes = memparse(memstring, &endchar);
+       balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+       return count;
+}
+
+static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+                  show_target_kb, store_target_kb);
+
+static struct sysdev_attribute *balloon_attrs[] = {
+       &attr_target_kb,
+};
+
+static struct attribute *balloon_info_attrs[] = {
+       &attr_current_kb.attr,
+       &attr_low_kb.attr,
+       &attr_high_kb.attr,
+       &attr_hard_limit_kb.attr,
+       &attr_driver_kb.attr,
+       NULL
+};
+
+static struct attribute_group balloon_info_group = {
+       .name = "info",
+       .attrs = balloon_info_attrs,
+};
+
+static struct sysdev_class balloon_sysdev_class = {
+       .name = BALLOON_CLASS_NAME,
+};
+
+static int register_balloon(struct sys_device *sysdev)
+{
+       int i, error;
+
+       error = sysdev_class_register(&balloon_sysdev_class);
+       if (error)
+               return error;
+
+       sysdev->id = 0;
+       sysdev->cls = &balloon_sysdev_class;
+
+       error = sysdev_register(sysdev);
+       if (error) {
+               sysdev_class_unregister(&balloon_sysdev_class);
+               return error;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
+               error = sysdev_create_file(sysdev, balloon_attrs[i]);
+               if (error)
+                       goto fail;
+       }
+
+       error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+       if (error)
+               goto fail;
+
+       return 0;
+
+ fail:
+       while (--i >= 0)
+               sysdev_remove_file(sysdev, balloon_attrs[i]);
+       sysdev_unregister(sysdev);
+       sysdev_class_unregister(&balloon_sysdev_class);
+       return error;
+}
+
+static void unregister_balloon(struct sys_device *sysdev)
+{
+       int i;
+
+       sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
+       for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
+               sysdev_remove_file(sysdev, balloon_attrs[i]);
+       sysdev_unregister(sysdev);
+       sysdev_class_unregister(&balloon_sysdev_class);
+}
+
+static void balloon_sysfs_exit(void)
+{
+       unregister_balloon(&balloon_sysdev);
+}
+
+MODULE_LICENSE("GPL");
similarity index 82%
rename from arch/x86/xen/events.c
rename to drivers/xen/events.c
index dcf613e17581448926794286ac9c1fad05011857..4f0f22b020ea4aa192dbab56cba76edd6076a837 100644 (file)
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 
+#include <xen/xen-ops.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
 
-#include "xen-ops.h"
-
 /*
  * This lock protects updates to the following mapping and reference-count
  * arrays. The lock does not need to be acquired to read the mapping tables.
@@ -455,6 +454,53 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
        notify_remote_via_irq(irq);
 }
 
+irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
+{
+       struct shared_info *sh = HYPERVISOR_shared_info;
+       int cpu = smp_processor_id();
+       int i;
+       unsigned long flags;
+       static DEFINE_SPINLOCK(debug_lock);
+
+       spin_lock_irqsave(&debug_lock, flags);
+
+       printk("vcpu %d\n  ", cpu);
+
+       for_each_online_cpu(i) {
+               struct vcpu_info *v = per_cpu(xen_vcpu, i);
+               printk("%d: masked=%d pending=%d event_sel %08lx\n  ", i,
+                       (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
+                       v->evtchn_upcall_pending,
+                       v->evtchn_pending_sel);
+       }
+       printk("pending:\n   ");
+       for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
+               printk("%08lx%s", sh->evtchn_pending[i],
+                       i % 8 == 0 ? "\n   " : " ");
+       printk("\nmasks:\n   ");
+       for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+               printk("%08lx%s", sh->evtchn_mask[i],
+                       i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nunmasked:\n   ");
+       for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+               printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
+                       i % 8 == 0 ? "\n   " : " ");
+
+       printk("\npending list:\n");
+       for(i = 0; i < NR_EVENT_CHANNELS; i++) {
+               if (sync_test_bit(i, sh->evtchn_pending)) {
+                       printk("  %d: event %d -> irq %d\n",
+                               cpu_evtchn[i], i,
+                               evtchn_to_irq[i]);
+               }
+       }
+
+       spin_unlock_irqrestore(&debug_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
 
 /*
  * Search the CPUs pending events bitmasks.  For each one found, map
@@ -470,29 +516,44 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
        int cpu = get_cpu();
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
-       unsigned long pending_words;
+       static DEFINE_PER_CPU(unsigned, nesting_count);
+       unsigned count;
 
-       vcpu_info->evtchn_upcall_pending = 0;
+       do {
+               unsigned long pending_words;
 
-       /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-       pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
-       while (pending_words != 0) {
-               unsigned long pending_bits;
-               int word_idx = __ffs(pending_words);
-               pending_words &= ~(1UL << word_idx);
+               vcpu_info->evtchn_upcall_pending = 0;
 
-               while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
-                       int bit_idx = __ffs(pending_bits);
-                       int port = (word_idx * BITS_PER_LONG) + bit_idx;
-                       int irq = evtchn_to_irq[port];
+               if (__get_cpu_var(nesting_count)++)
+                       goto out;
 
-                       if (irq != -1) {
-                               regs->orig_ax = ~irq;
-                               do_IRQ(regs);
+#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
+               /* Clear master flag /before/ clearing selector flag. */
+               rmb();
+#endif
+               pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+               while (pending_words != 0) {
+                       unsigned long pending_bits;
+                       int word_idx = __ffs(pending_words);
+                       pending_words &= ~(1UL << word_idx);
+
+                       while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
+                               int bit_idx = __ffs(pending_bits);
+                               int port = (word_idx * BITS_PER_LONG) + bit_idx;
+                               int irq = evtchn_to_irq[port];
+
+                               if (irq != -1)
+                                       xen_do_IRQ(irq, regs);
                        }
                }
-       }
 
+               BUG_ON(!irqs_disabled());
+
+               count = __get_cpu_var(nesting_count);
+               __get_cpu_var(nesting_count) = 0;
+       } while(count != 1);
+
+out:
        put_cpu();
 }
 
@@ -525,6 +586,22 @@ static void set_affinity_irq(unsigned irq, cpumask_t dest)
        rebind_irq_to_cpu(irq, tcpu);
 }
 
+int resend_irq_on_evtchn(unsigned int irq)
+{
+       int masked, evtchn = evtchn_from_irq(irq);
+       struct shared_info *s = HYPERVISOR_shared_info;
+
+       if (!VALID_EVTCHN(evtchn))
+               return 1;
+
+       masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
+       sync_set_bit(evtchn, s->evtchn_pending);
+       if (!masked)
+               unmask_evtchn(evtchn);
+
+       return 1;
+}
+
 static void enable_dynirq(unsigned int irq)
 {
        int evtchn = evtchn_from_irq(irq);
@@ -554,10 +631,16 @@ static void ack_dynirq(unsigned int irq)
 static int retrigger_dynirq(unsigned int irq)
 {
        int evtchn = evtchn_from_irq(irq);
+       struct shared_info *sh = HYPERVISOR_shared_info;
        int ret = 0;
 
        if (VALID_EVTCHN(evtchn)) {
-               set_evtchn(evtchn);
+               int masked;
+
+               masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
+               sync_set_bit(evtchn, sh->evtchn_pending);
+               if (!masked)
+                       unmask_evtchn(evtchn);
                ret = 1;
        }
 
index d85dc6d41c2aee930f79f08cb31ebecb406e948c..52b6b41b909de5a142fc5eee2c96255e900c6039 100644 (file)
@@ -439,24 +439,6 @@ static inline unsigned int max_nr_grant_frames(void)
        return xen_max;
 }
 
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-                     unsigned long addr, void *data)
-{
-       unsigned long **frames = (unsigned long **)data;
-
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
-       return 0;
-}
-
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-                       unsigned long addr, void *data)
-{
-
-       set_pte_at(&init_mm, addr, pte, __pte(0));
-       return 0;
-}
-
 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 {
        struct gnttab_setup_table setup;
@@ -470,7 +452,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 
        setup.dom        = DOMID_SELF;
        setup.nr_frames  = nr_gframes;
-       setup.frame_list = frames;
+       set_xen_guest_handle(setup.frame_list, frames);
 
        rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
        if (rc == -ENOSYS) {
@@ -480,17 +462,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 
        BUG_ON(rc || setup.status);
 
-       if (shared == NULL) {
-               struct vm_struct *area;
-               area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
-               BUG_ON(area == NULL);
-               shared = area->addr;
-       }
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn, &frames);
+       rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(),
+                                   &shared);
        BUG_ON(rc);
-       frames -= nr_gframes; /* adjust after map_pte_fn() */
 
        kfree(frames);
 
@@ -506,10 +480,7 @@ static int gnttab_resume(void)
 
 static int gnttab_suspend(void)
 {
-       apply_to_page_range(&init_mm, (unsigned long)shared,
-                           PAGE_SIZE * nr_grant_frames,
-                           unmap_pte_fn, NULL);
-
+       arch_gnttab_unmap_shared(shared, nr_grant_frames);
        return 0;
 }
 
index 9fd2f70ab46d873af33fdc1e6f14f0e9f2fd9826..0f86b0ff78796517838399d956e90577924f56e5 100644 (file)
@@ -399,7 +399,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 
        *vaddr = NULL;
 
-       area = alloc_vm_area(PAGE_SIZE);
+       area = xen_alloc_vm_area(PAGE_SIZE);
        if (!area)
                return -ENOMEM;
 
@@ -409,7 +409,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
                BUG();
 
        if (op.status != GNTST_okay) {
-               free_vm_area(area);
+               xen_free_vm_area(area);
                xenbus_dev_fatal(dev, op.status,
                                 "mapping in shared page %d from domain %d",
                                 gnt_ref, dev->otherend_id);
@@ -508,7 +508,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
                BUG();
 
        if (op.status == GNTST_okay)
-               free_vm_area(area);
+               xen_free_vm_area(area);
        else
                xenbus_dev_error(dev, op.status,
                                 "unmapping page at handle %d error %d",
index 4750de316ad36fbde50524d81beac8f10cfd424f..57ceb5346b749338e153ccdbeaf4bf5ceb09c19f 100644 (file)
@@ -88,6 +88,16 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv)
        return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
 }
 
+static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
+{
+       struct xenbus_device *dev = to_xenbus_device(_dev);
+
+       if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
+               return -ENOMEM;
+
+       return 0;
+}
+
 /* device/<type>/<id> => <type>-<id> */
 static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
 {
@@ -166,6 +176,7 @@ static struct xen_bus_type xenbus_frontend = {
        .bus = {
                .name     = "xen",
                .match    = xenbus_match,
+               .uevent   = xenbus_uevent,
                .probe    = xenbus_dev_probe,
                .remove   = xenbus_dev_remove,
                .shutdown = xenbus_dev_shutdown,
@@ -438,6 +449,12 @@ static ssize_t xendev_show_devtype(struct device *dev,
 }
 DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
 
+static ssize_t xendev_show_modalias(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
+}
+DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
 
 int xenbus_probe_node(struct xen_bus_type *bus,
                      const char *type,
@@ -492,10 +509,16 @@ int xenbus_probe_node(struct xen_bus_type *bus,
 
        err = device_create_file(&xendev->dev, &dev_attr_devtype);
        if (err)
-               goto fail_remove_file;
+               goto fail_remove_nodename;
+
+       err = device_create_file(&xendev->dev, &dev_attr_modalias);
+       if (err)
+               goto fail_remove_devtype;
 
        return 0;
-fail_remove_file:
+fail_remove_devtype:
+       device_remove_file(&xendev->dev, &dev_attr_devtype);
+fail_remove_nodename:
        device_remove_file(&xendev->dev, &dev_attr_nodename);
 fail_unregister:
        device_unregister(&xendev->dev);
@@ -846,6 +869,7 @@ static int is_disconnected_device(struct device *dev, void *data)
 {
        struct xenbus_device *xendev = to_xenbus_device(dev);
        struct device_driver *drv = data;
+       struct xenbus_driver *xendrv;
 
        /*
         * A device with no driver will never connect. We care only about
@@ -858,7 +882,9 @@ static int is_disconnected_device(struct device *dev, void *data)
        if (drv && (dev->driver != drv))
                return 0;
 
-       return (xendev->state != XenbusStateConnected);
+       xendrv = to_xenbus_driver(dev->driver);
+       return (xendev->state != XenbusStateConnected ||
+               (xendrv->is_ready && !xendrv->is_ready(xendev)));
 }
 
 static int exists_disconnected_device(struct device_driver *drv)
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
new file mode 100644 (file)
index 0000000..797cb4e
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <xen/xencomm.h>
+#include <xen/interface/xen.h>
+#ifdef __ia64__
+#include <asm/xen/xencomm.h>   /* for is_kern_addr() */
+#endif
+
+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
+#include <xen/platform-compat.h>
+#endif
+
+static int xencomm_init(struct xencomm_desc *desc,
+                       void *buffer, unsigned long bytes)
+{
+       unsigned long recorded = 0;
+       int i = 0;
+
+       while ((recorded < bytes) && (i < desc->nr_addrs)) {
+               unsigned long vaddr = (unsigned long)buffer + recorded;
+               unsigned long paddr;
+               int offset;
+               int chunksz;
+
+               offset = vaddr % PAGE_SIZE; /* handle partial pages */
+               chunksz = min(PAGE_SIZE - offset, bytes - recorded);
+
+               paddr = xencomm_vtop(vaddr);
+               if (paddr == ~0UL) {
+                       printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
+                              __func__, vaddr);
+                       return -EINVAL;
+               }
+
+               desc->address[i++] = paddr;
+               recorded += chunksz;
+       }
+
+       if (recorded < bytes) {
+               printk(KERN_DEBUG
+                      "%s: could only translate %ld of %ld bytes\n",
+                      __func__, recorded, bytes);
+               return -ENOSPC;
+       }
+
+       /* mark remaining addresses invalid (just for safety) */
+       while (i < desc->nr_addrs)
+               desc->address[i++] = XENCOMM_INVALID;
+
+       desc->magic = XENCOMM_MAGIC;
+
+       return 0;
+}
+
+static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
+                                         void *buffer, unsigned long bytes)
+{
+       struct xencomm_desc *desc;
+       unsigned long buffer_ulong = (unsigned long)buffer;
+       unsigned long start = buffer_ulong & PAGE_MASK;
+       unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
+       unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
+       unsigned long size = sizeof(*desc) +
+               sizeof(desc->address[0]) * nr_addrs;
+
+       /*
+        * slab allocator returns at least sizeof(void*) aligned pointer.
+        * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
+        * cross page boundary.
+        */
+       if (sizeof(*desc) > sizeof(void *)) {
+               unsigned long order = get_order(size);
+               desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
+                                                              order);
+               if (desc == NULL)
+                       return NULL;
+
+               desc->nr_addrs =
+                       ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
+                       sizeof(*desc->address);
+       } else {
+               desc = kmalloc(size, gfp_mask);
+               if (desc == NULL)
+                       return NULL;
+
+               desc->nr_addrs = nr_addrs;
+       }
+       return desc;
+}
+
+void xencomm_free(struct xencomm_handle *desc)
+{
+       if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
+               struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
+               if (sizeof(*desc__) > sizeof(void *)) {
+                       unsigned long size = sizeof(*desc__) +
+                               sizeof(desc__->address[0]) * desc__->nr_addrs;
+                       unsigned long order = get_order(size);
+                       free_pages((unsigned long)__va(desc), order);
+               } else
+                       kfree(__va(desc));
+       }
+}
+
+static int xencomm_create(void *buffer, unsigned long bytes,
+                         struct xencomm_desc **ret, gfp_t gfp_mask)
+{
+       struct xencomm_desc *desc;
+       int rc;
+
+       pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
+
+       if (bytes == 0) {
+               /* don't create a descriptor; Xen recognizes NULL. */
+               BUG_ON(buffer != NULL);
+               *ret = NULL;
+               return 0;
+       }
+
+       BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
+
+       desc = xencomm_alloc(gfp_mask, buffer, bytes);
+       if (!desc) {
+               printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
+               return -ENOMEM;
+       }
+
+       rc = xencomm_init(desc, buffer, bytes);
+       if (rc) {
+               printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
+               xencomm_free((struct xencomm_handle *)__pa(desc));
+               return rc;
+       }
+
+       *ret = desc;
+       return 0;
+}
+
+/* check if memory address is within VMALLOC region  */
+static int is_phys_contiguous(unsigned long addr)
+{
+       if (!is_kernel_addr(addr))
+               return 0;
+
+       return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
+}
+
+static struct xencomm_handle *xencomm_create_inline(void *ptr)
+{
+       unsigned long paddr;
+
+       BUG_ON(!is_phys_contiguous((unsigned long)ptr));
+
+       paddr = (unsigned long)xencomm_pa(ptr);
+       BUG_ON(paddr & XENCOMM_INLINE_FLAG);
+       return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
+}
+
+/* "mini" routine, for stack-based communications: */
+static int xencomm_create_mini(void *buffer,
+       unsigned long bytes, struct xencomm_mini *xc_desc,
+       struct xencomm_desc **ret)
+{
+       int rc = 0;
+       struct xencomm_desc *desc;
+       BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
+
+       desc = (void *)xc_desc;
+
+       desc->nr_addrs = XENCOMM_MINI_ADDRS;
+
+       rc = xencomm_init(desc, buffer, bytes);
+       if (!rc)
+               *ret = desc;
+
+       return rc;
+}
+
+struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
+{
+       int rc;
+       struct xencomm_desc *desc;
+
+       if (is_phys_contiguous((unsigned long)ptr))
+               return xencomm_create_inline(ptr);
+
+       rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
+
+       if (rc || desc == NULL)
+               return NULL;
+
+       return xencomm_pa(desc);
+}
+
+struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
+                       struct xencomm_mini *xc_desc)
+{
+       int rc;
+       struct xencomm_desc *desc = NULL;
+
+       if (is_phys_contiguous((unsigned long)ptr))
+               return xencomm_create_inline(ptr);
+
+       rc = xencomm_create_mini(ptr, bytes, xc_desc,
+                               &desc);
+
+       if (rc)
+               return NULL;
+
+       return xencomm_pa(desc);
+}
index d14d5a4dc5ac6f9a956fc1653e04de94079470e7..3ea36554107fc7740558ac81daa0328c308b2903 100644 (file)
@@ -14,7 +14,7 @@ be fairly close.
        alloc_sem
        ---------
 
-The alloc_sem is a per-filesystem semaphore, used primarily to ensure
+The alloc_sem is a per-filesystem mutex, used primarily to ensure
 contiguous allocation of space on the medium. It is automatically
 obtained during space allocations (jffs2_reserve_space()) and freed
 upon write completion (jffs2_complete_reservation()). Note that
@@ -41,10 +41,10 @@ if the wbuf is currently holding any data is permitted, though.
 Ordering constraints: See f->sem.
 
 
-       File Semaphore f->sem
+       File Mutex f->sem
        ---------------------
 
-This is the JFFS2-internal equivalent of the inode semaphore i->i_sem.
+This is the JFFS2-internal equivalent of the inode mutex i->i_sem.
 It protects the contents of the jffs2_inode_info private inode data,
 including the linked list of node fragments (but see the notes below on
 erase_completion_lock), etc.
@@ -60,14 +60,14 @@ lead to deadlock, unless we played games with unlocking the i_sem
 before calling the space allocation functions.
 
 Instead of playing such games, we just have an extra internal
-semaphore, which is obtained by the garbage collection code and also
+mutex, which is obtained by the garbage collection code and also
 by the normal file system code _after_ allocation of space.
 
 Ordering constraints: 
 
        1. Never attempt to allocate space or lock alloc_sem with 
           any f->sem held.
-       2. Never attempt to lock two file semaphores in one thread.
+       2. Never attempt to lock two file mutexes in one thread.
           No ordering rules have been made for doing so.
 
 
@@ -86,8 +86,8 @@ a simple spin_lock() rather than spin_lock_bh().
 
 Note that the per-inode list of physical nodes (f->nodes) is a special
 case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in
-the list are protected by the file semaphore f->sem. But the erase
-code may remove _obsolete_ nodes from the list while holding only the
+the list are protected by the file mutex f->sem. But the erase code
+may remove _obsolete_ nodes from the list while holding only the
 erase_completion_lock. So you can walk the list only while holding the
 erase_completion_lock, and can drop the lock temporarily mid-walk as
 long as the pointer you're holding is to a _valid_ node, not an
@@ -124,10 +124,10 @@ Ordering constraints:
        erase_free_sem
        --------------
 
-This semaphore is only used by the erase code which frees obsolete
-node references and the jffs2_garbage_collect_deletion_dirent()
-function. The latter function on NAND flash must read _obsolete_ nodes
-to determine whether the 'deletion dirent' under consideration can be
+This mutex is only used by the erase code which frees obsolete node
+references and the jffs2_garbage_collect_deletion_dirent() function.
+The latter function on NAND flash must read _obsolete_ nodes to
+determine whether the 'deletion dirent' under consideration can be
 discarded or whether it is still required to show that an inode has
 been unlinked. Because reading from the flash may sleep, the
 erase_completion_lock cannot be held, so an alternative, more
index 722a6b682951b8bcdecac8107171c2d06f5d60be..d58f845ccb85984666d2e6ef0d1d6d9339c10d67 100644 (file)
@@ -345,6 +345,7 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
        INIT_LIST_HEAD(&c->dirty_list);
        INIT_LIST_HEAD(&c->erasable_list);
        INIT_LIST_HEAD(&c->erasing_list);
+       INIT_LIST_HEAD(&c->erase_checking_list);
        INIT_LIST_HEAD(&c->erase_pending_list);
        INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
        INIT_LIST_HEAD(&c->erase_complete_list);
index 3a32c64ed4975260079e7b9f3c435eecef18b534..5544d31c066be22fb25cc857df3691de8e420ea6 100644 (file)
@@ -62,9 +62,9 @@ __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
 void
 __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f)
 {
-       down(&f->sem);
+       mutex_lock(&f->sem);
        __jffs2_dbg_fragtree_paranoia_check_nolock(f);
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 }
 
 void
@@ -153,6 +153,139 @@ __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
        kfree(buf);
 }
 
+void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
+{
+       struct jffs2_eraseblock *jeb;
+       uint32_t free = 0, dirty = 0, used = 0, wasted = 0,
+               erasing = 0, bad = 0, unchecked = 0;
+       int nr_counted = 0;
+       int dump = 0;
+
+       if (c->gcblock) {
+               nr_counted++;
+               free += c->gcblock->free_size;
+               dirty += c->gcblock->dirty_size;
+               used += c->gcblock->used_size;
+               wasted += c->gcblock->wasted_size;
+               unchecked += c->gcblock->unchecked_size;
+       }
+       if (c->nextblock) {
+               nr_counted++;
+               free += c->nextblock->free_size;
+               dirty += c->nextblock->dirty_size;
+               used += c->nextblock->used_size;
+               wasted += c->nextblock->wasted_size;
+               unchecked += c->nextblock->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->clean_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->very_dirty_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->dirty_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->erasable_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->erase_pending_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->free_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+       list_for_each_entry(jeb, &c->bad_used_list, list) {
+               nr_counted++;
+               free += jeb->free_size;
+               dirty += jeb->dirty_size;
+               used += jeb->used_size;
+               wasted += jeb->wasted_size;
+               unchecked += jeb->unchecked_size;
+       }
+
+       list_for_each_entry(jeb, &c->erasing_list, list) {
+               nr_counted++;
+               erasing += c->sector_size;
+       }
+       list_for_each_entry(jeb, &c->erase_checking_list, list) {
+               nr_counted++;
+               erasing += c->sector_size;
+       }
+       list_for_each_entry(jeb, &c->erase_complete_list, list) {
+               nr_counted++;
+               erasing += c->sector_size;
+       }
+       list_for_each_entry(jeb, &c->bad_list, list) {
+               nr_counted++;
+               bad += c->sector_size;
+       }
+
+#define check(sz) \
+       if (sz != c->sz##_size) {                       \
+               printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \
+                      sz, c->sz##_size);               \
+               dump = 1;                               \
+       }
+       check(free);
+       check(dirty);
+       check(used);
+       check(wasted);
+       check(unchecked);
+       check(bad);
+       check(erasing);
+#undef check
+
+       if (nr_counted != c->nr_blocks) {
+               printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
+                      __func__, nr_counted, c->nr_blocks);
+               dump = 1;
+       }
+
+       if (dump) {
+               __jffs2_dbg_dump_block_lists_nolock(c);
+               BUG();
+       }
+}
+
 /*
  * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'.
  */
@@ -229,6 +362,9 @@ __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
        }
 #endif
 
+       if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING)))
+               __jffs2_dbg_superblock_counts(c);
+
        return;
 
 error:
@@ -268,7 +404,10 @@ __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
 
        printk(JFFS2_DBG);
        for (ref = jeb->first_node; ; ref = ref_next(ref)) {
-               printk("%#08x(%#x)", ref_offset(ref), ref->__totlen);
+               printk("%#08x", ref_offset(ref));
+#ifdef TEST_TOTLEN
+               printk("(%x)", ref->__totlen);
+#endif
                if (ref_next(ref))
                        printk("->");
                else
@@ -447,6 +586,21 @@ __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
                        }
                }
        }
+       if (list_empty(&c->erase_checking_list)) {
+               printk(JFFS2_DBG "erase_checking_list: empty\n");
+       } else {
+               struct list_head *this;
+
+               list_for_each(this, &c->erase_checking_list) {
+                       struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+                       if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+                               printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+                                       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+                                       jeb->unchecked_size, jeb->free_size);
+                       }
+               }
+       }
 
        if (list_empty(&c->erase_pending_list)) {
                printk(JFFS2_DBG "erase_pending_list: empty\n");
@@ -532,9 +686,9 @@ __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
 void
 __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f)
 {
-       down(&f->sem);
+       mutex_lock(&f->sem);
        jffs2_dbg_dump_fragtree_nolock(f);
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 }
 
 void
index 4130adabd76e6d9a9d496d1e6d176a43cb905783..9645275023e68c5b93f582a10d31d541a4a79884 100644 (file)
@@ -38,6 +38,7 @@
 
 #if CONFIG_JFFS2_FS_DEBUG > 1
 #define JFFS2_DBG_FRAGTREE2_MESSAGES
+#define JFFS2_DBG_READINODE2_MESSAGES
 #define JFFS2_DBG_MEMALLOC_MESSAGES
 #endif
 
 #else
 #define dbg_readinode(fmt, ...)
 #endif
+#ifdef JFFS2_DBG_READINODE2_MESSAGES
+#define dbg_readinode2(fmt, ...)       JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_readinode2(fmt, ...)
+#endif
 
 /* Fragtree build debugging messages */
 #ifdef JFFS2_DBG_FRAGTREE_MESSAGES
index f948f7e6ec8202ab901d83b7fd5c62fd6ec41b27..c63e7a96af0dd025996c80b707b710213d7bdb1d 100644 (file)
@@ -86,7 +86,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
        dir_f = JFFS2_INODE_INFO(dir_i);
        c = JFFS2_SB_INFO(dir_i->i_sb);
 
-       down(&dir_f->sem);
+       mutex_lock(&dir_f->sem);
 
        /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
        for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
@@ -99,7 +99,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
        }
        if (fd)
                ino = fd->ino;
-       up(&dir_f->sem);
+       mutex_unlock(&dir_f->sem);
        if (ino) {
                inode = jffs2_iget(dir_i->i_sb, ino);
                if (IS_ERR(inode)) {
@@ -146,7 +146,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
        }
 
        curofs=1;
-       down(&f->sem);
+       mutex_lock(&f->sem);
        for (fd = f->dents; fd; fd = fd->next) {
 
                curofs++;
@@ -166,7 +166,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        break;
                offset++;
        }
-       up(&f->sem);
+       mutex_unlock(&f->sem);
  out:
        filp->f_pos = offset;
        return 0;
@@ -275,9 +275,9 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
        ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
 
        if (!ret) {
-               down(&f->sem);
+               mutex_lock(&f->sem);
                old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                d_instantiate(dentry, old_dentry->d_inode);
                dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
                atomic_inc(&old_dentry->d_inode->i_count);
@@ -351,7 +351,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
 
        if (IS_ERR(fn)) {
                /* Eeek. Wave bye bye */
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_complete_reservation(c);
                jffs2_clear_inode(inode);
                return PTR_ERR(fn);
@@ -361,7 +361,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
        f->target = kmalloc(targetlen + 1, GFP_KERNEL);
        if (!f->target) {
                printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_complete_reservation(c);
                jffs2_clear_inode(inode);
                return -ENOMEM;
@@ -374,7 +374,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
           obsoleted by the first data write
        */
        f->metadata = fn;
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 
        jffs2_complete_reservation(c);
 
@@ -406,7 +406,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
        }
 
        dir_f = JFFS2_INODE_INFO(dir_i);
-       down(&dir_f->sem);
+       mutex_lock(&dir_f->sem);
 
        rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
        rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -429,7 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
                   as if it were the final unlink() */
                jffs2_complete_reservation(c);
                jffs2_free_raw_dirent(rd);
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
                jffs2_clear_inode(inode);
                return PTR_ERR(fd);
        }
@@ -442,7 +442,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
           one if necessary. */
        jffs2_add_fd_to_list(c, fd, &dir_f->dents);
 
-       up(&dir_f->sem);
+       mutex_unlock(&dir_f->sem);
        jffs2_complete_reservation(c);
 
        d_instantiate(dentry, inode);
@@ -507,7 +507,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
 
        if (IS_ERR(fn)) {
                /* Eeek. Wave bye bye */
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_complete_reservation(c);
                jffs2_clear_inode(inode);
                return PTR_ERR(fn);
@@ -516,7 +516,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
           obsoleted by the first data write
        */
        f->metadata = fn;
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 
        jffs2_complete_reservation(c);
 
@@ -548,7 +548,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
        }
 
        dir_f = JFFS2_INODE_INFO(dir_i);
-       down(&dir_f->sem);
+       mutex_lock(&dir_f->sem);
 
        rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
        rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -571,7 +571,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
                   as if it were the final unlink() */
                jffs2_complete_reservation(c);
                jffs2_free_raw_dirent(rd);
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
                jffs2_clear_inode(inode);
                return PTR_ERR(fd);
        }
@@ -585,7 +585,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
           one if necessary. */
        jffs2_add_fd_to_list(c, fd, &dir_f->dents);
 
-       up(&dir_f->sem);
+       mutex_unlock(&dir_f->sem);
        jffs2_complete_reservation(c);
 
        d_instantiate(dentry, inode);
@@ -673,7 +673,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
 
        if (IS_ERR(fn)) {
                /* Eeek. Wave bye bye */
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_complete_reservation(c);
                jffs2_clear_inode(inode);
                return PTR_ERR(fn);
@@ -682,7 +682,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
           obsoleted by the first data write
        */
        f->metadata = fn;
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 
        jffs2_complete_reservation(c);
 
@@ -714,7 +714,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
        }
 
        dir_f = JFFS2_INODE_INFO(dir_i);
-       down(&dir_f->sem);
+       mutex_lock(&dir_f->sem);
 
        rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
        rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -740,7 +740,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
                   as if it were the final unlink() */
                jffs2_complete_reservation(c);
                jffs2_free_raw_dirent(rd);
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
                jffs2_clear_inode(inode);
                return PTR_ERR(fd);
        }
@@ -753,7 +753,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
           one if necessary. */
        jffs2_add_fd_to_list(c, fd, &dir_f->dents);
 
-       up(&dir_f->sem);
+       mutex_unlock(&dir_f->sem);
        jffs2_complete_reservation(c);
 
        d_instantiate(dentry, inode);
@@ -780,14 +780,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                if (S_ISDIR(new_dentry->d_inode->i_mode)) {
                        struct jffs2_full_dirent *fd;
 
-                       down(&victim_f->sem);
+                       mutex_lock(&victim_f->sem);
                        for (fd = victim_f->dents; fd; fd = fd->next) {
                                if (fd->ino) {
-                                       up(&victim_f->sem);
+                                       mutex_unlock(&victim_f->sem);
                                        return -ENOTEMPTY;
                                }
                        }
-                       up(&victim_f->sem);
+                       mutex_unlock(&victim_f->sem);
                }
        }
 
@@ -816,9 +816,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                /* Don't oops if the victim was a dirent pointing to an
                   inode which didn't exist. */
                if (victim_f->inocache) {
-                       down(&victim_f->sem);
+                       mutex_lock(&victim_f->sem);
                        victim_f->inocache->nlink--;
-                       up(&victim_f->sem);
+                       mutex_unlock(&victim_f->sem);
                }
        }
 
@@ -836,11 +836,11 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
        if (ret) {
                /* Oh shit. We really ought to make a single node which can do both atomically */
                struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
-               down(&f->sem);
+               mutex_lock(&f->sem);
                inc_nlink(old_dentry->d_inode);
                if (f->inocache)
                        f->inocache->nlink++;
-               up(&f->sem);
+               mutex_unlock(&f->sem);
 
                printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
                /* Might as well let the VFS know */
index a1db9180633fcb3476757af80f22b3e6621f06d2..25a640e566d3d6d47ae9310b12b3e222f7bbed52 100644 (file)
@@ -50,14 +50,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
        instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
        if (!instr) {
                printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
-               down(&c->erase_free_sem);
+               mutex_lock(&c->erase_free_sem);
                spin_lock(&c->erase_completion_lock);
                list_move(&jeb->list, &c->erase_pending_list);
                c->erasing_size -= c->sector_size;
                c->dirty_size += c->sector_size;
                jeb->dirty_size = c->sector_size;
                spin_unlock(&c->erase_completion_lock);
-               up(&c->erase_free_sem);
+               mutex_unlock(&c->erase_free_sem);
                return;
        }
 
@@ -84,14 +84,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
        if (ret == -ENOMEM || ret == -EAGAIN) {
                /* Erase failed immediately. Refile it on the list */
                D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
-               down(&c->erase_free_sem);
+               mutex_lock(&c->erase_free_sem);
                spin_lock(&c->erase_completion_lock);
                list_move(&jeb->list, &c->erase_pending_list);
                c->erasing_size -= c->sector_size;
                c->dirty_size += c->sector_size;
                jeb->dirty_size = c->sector_size;
                spin_unlock(&c->erase_completion_lock);
-               up(&c->erase_free_sem);
+               mutex_unlock(&c->erase_free_sem);
                return;
        }
 
@@ -107,7 +107,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
 {
        struct jffs2_eraseblock *jeb;
 
-       down(&c->erase_free_sem);
+       mutex_lock(&c->erase_free_sem);
 
        spin_lock(&c->erase_completion_lock);
 
@@ -116,9 +116,9 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
 
                if (!list_empty(&c->erase_complete_list)) {
                        jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
-                       list_del(&jeb->list);
+                       list_move(&jeb->list, &c->erase_checking_list);
                        spin_unlock(&c->erase_completion_lock);
-                       up(&c->erase_free_sem);
+                       mutex_unlock(&c->erase_free_sem);
                        jffs2_mark_erased_block(c, jeb);
 
                        if (!--count) {
@@ -139,7 +139,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
                        jffs2_free_jeb_node_refs(c, jeb);
                        list_add(&jeb->list, &c->erasing_list);
                        spin_unlock(&c->erase_completion_lock);
-                       up(&c->erase_free_sem);
+                       mutex_unlock(&c->erase_free_sem);
 
                        jffs2_erase_block(c, jeb);
 
@@ -149,12 +149,12 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
 
                /* Be nice */
                yield();
-               down(&c->erase_free_sem);
+               mutex_lock(&c->erase_free_sem);
                spin_lock(&c->erase_completion_lock);
        }
 
        spin_unlock(&c->erase_completion_lock);
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
  done:
        D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
 }
@@ -162,11 +162,11 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
 {
        D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
-       down(&c->erase_free_sem);
+       mutex_lock(&c->erase_free_sem);
        spin_lock(&c->erase_completion_lock);
        list_move_tail(&jeb->list, &c->erase_complete_list);
        spin_unlock(&c->erase_completion_lock);
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
        /* Ensure that kupdated calls us again to mark them clean */
        jffs2_erase_pending_trigger(c);
 }
@@ -180,26 +180,26 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
                   failed too many times. */
                if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
                        /* We'd like to give this block another try. */
-                       down(&c->erase_free_sem);
+                       mutex_lock(&c->erase_free_sem);
                        spin_lock(&c->erase_completion_lock);
                        list_move(&jeb->list, &c->erase_pending_list);
                        c->erasing_size -= c->sector_size;
                        c->dirty_size += c->sector_size;
                        jeb->dirty_size = c->sector_size;
                        spin_unlock(&c->erase_completion_lock);
-                       up(&c->erase_free_sem);
+                       mutex_unlock(&c->erase_free_sem);
                        return;
                }
        }
 
-       down(&c->erase_free_sem);
+       mutex_lock(&c->erase_free_sem);
        spin_lock(&c->erase_completion_lock);
        c->erasing_size -= c->sector_size;
        c->bad_size += c->sector_size;
        list_move(&jeb->list, &c->bad_list);
        c->nr_erasing_blocks--;
        spin_unlock(&c->erase_completion_lock);
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
        wake_up(&c->erase_wait);
 }
 
@@ -350,9 +350,11 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
                           break;
                } while(--retlen);
                c->mtd->unpoint(c->mtd, ebuf, jeb->offset, c->sector_size);
-               if (retlen)
+               if (retlen) {
                        printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
                               *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf));
+                       return -EIO;
+               }
                return 0;
        }
  do_flash_read:
@@ -373,10 +375,12 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
                ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
                if (ret) {
                        printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+                       ret = -EIO;
                        goto fail;
                }
                if (retlen != readlen) {
                        printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+                       ret = -EIO;
                        goto fail;
                }
                for (i=0; i<readlen; i += sizeof(unsigned long)) {
@@ -385,6 +389,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
                        if (*datum + 1) {
                                *bad_offset += i;
                                printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
+                               ret = -EIO;
                                goto fail;
                        }
                }
@@ -419,9 +424,6 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
                        if (jffs2_write_nand_cleanmarker(c, jeb))
                                goto filebad;
                }
-
-               /* Everything else got zeroed before the erase */
-               jeb->free_size = c->sector_size;
        } else {
 
                struct kvec vecs[1];
@@ -449,48 +451,50 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
 
                        goto filebad;
                }
-
-               /* Everything else got zeroed before the erase */
-               jeb->free_size = c->sector_size;
-               /* FIXME Special case for cleanmarker in empty block */
-               jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
        }
+       /* Everything else got zeroed before the erase */
+       jeb->free_size = c->sector_size;
 
-       down(&c->erase_free_sem);
+       mutex_lock(&c->erase_free_sem);
        spin_lock(&c->erase_completion_lock);
+
        c->erasing_size -= c->sector_size;
-       c->free_size += jeb->free_size;
-       c->used_size += jeb->used_size;
+       c->free_size += c->sector_size;
 
-       jffs2_dbg_acct_sanity_check_nolock(c,jeb);
-       jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+       /* Account for cleanmarker now, if it's in-band */
+       if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c))
+               jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
 
-       list_add_tail(&jeb->list, &c->free_list);
+       list_move_tail(&jeb->list, &c->free_list);
        c->nr_erasing_blocks--;
        c->nr_free_blocks++;
+
+       jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+       jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+
        spin_unlock(&c->erase_completion_lock);
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
        wake_up(&c->erase_wait);
        return;
 
 filebad:
-       down(&c->erase_free_sem);
+       mutex_lock(&c->erase_free_sem);
        spin_lock(&c->erase_completion_lock);
        /* Stick it on a list (any list) so erase_failed can take it
           right off again.  Silly, but shouldn't happen often. */
-       list_add(&jeb->list, &c->erasing_list);
+       list_move(&jeb->list, &c->erasing_list);
        spin_unlock(&c->erase_completion_lock);
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
        jffs2_erase_failed(c, jeb, bad_offset);
        return;
 
 refile:
        /* Stick it back on the list from whence it came and come back later */
        jffs2_erase_pending_trigger(c);
-       down(&c->erase_free_sem);
+       mutex_lock(&c->erase_free_sem);
        spin_lock(&c->erase_completion_lock);
-       list_add(&jeb->list, &c->erase_complete_list);
+       list_move(&jeb->list, &c->erase_complete_list);
        spin_unlock(&c->erase_completion_lock);
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
        return;
 }
index dcc2734e0b5d65479a34ba53a9bf7ea2f08ecbbf..5e920343b2c598595f2c068455e17d7d360c5a4f 100644 (file)
@@ -115,9 +115,9 @@ static int jffs2_readpage (struct file *filp, struct page *pg)
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
        int ret;
 
-       down(&f->sem);
+       mutex_lock(&f->sem);
        ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
-       up(&f->sem);
+       mutex_unlock(&f->sem);
        return ret;
 }
 
@@ -154,7 +154,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                if (ret)
                        goto out_page;
 
-               down(&f->sem);
+               mutex_lock(&f->sem);
                memset(&ri, 0, sizeof(ri));
 
                ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -181,7 +181,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                if (IS_ERR(fn)) {
                        ret = PTR_ERR(fn);
                        jffs2_complete_reservation(c);
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -195,12 +195,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                        jffs2_mark_node_obsolete(c, fn->raw);
                        jffs2_free_full_dnode(fn);
                        jffs2_complete_reservation(c);
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                jffs2_complete_reservation(c);
                inode->i_size = pageofs;
-               up(&f->sem);
+               mutex_unlock(&f->sem);
        }
 
        /*
@@ -209,9 +209,9 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
         * case of a short-copy.
         */
        if (!PageUptodate(pg)) {
-               down(&f->sem);
+               mutex_lock(&f->sem);
                ret = jffs2_do_readpage_nolock(inode, pg);
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                if (ret)
                        goto out_page;
        }
index e26ea78c7892f7a4f157819a06e052add03057fd..3eb1c84b0a33289b62dfef1dd21d774647302d64 100644 (file)
@@ -36,6 +36,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
        unsigned int ivalid;
        uint32_t alloclen;
        int ret;
+       int alloc_type = ALLOC_NORMAL;
 
        D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
 
@@ -50,20 +51,20 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
                mdata = (char *)&dev;
                D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
        } else if (S_ISLNK(inode->i_mode)) {
-               down(&f->sem);
+               mutex_lock(&f->sem);
                mdatalen = f->metadata->size;
                mdata = kmalloc(f->metadata->size, GFP_USER);
                if (!mdata) {
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        return -ENOMEM;
                }
                ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
                if (ret) {
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        kfree(mdata);
                        return ret;
                }
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
        }
 
@@ -82,7 +83,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
                         kfree(mdata);
                return ret;
        }
-       down(&f->sem);
+       mutex_lock(&f->sem);
        ivalid = iattr->ia_valid;
 
        ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -115,6 +116,10 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
                ri->compr = JFFS2_COMPR_ZERO;
                ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
                ri->offset = cpu_to_je32(inode->i_size);
+       } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
+               /* For truncate-to-zero, treat it as deletion because
+                  it'll always be obsoleting all previous nodes */
+               alloc_type = ALLOC_DELETION;
        }
        ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
        if (mdatalen)
@@ -122,14 +127,14 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
        else
                ri->data_crc = cpu_to_je32(0);
 
-       new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL);
+       new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
        if (S_ISLNK(inode->i_mode))
                kfree(mdata);
 
        if (IS_ERR(new_metadata)) {
                jffs2_complete_reservation(c);
                jffs2_free_raw_inode(ri);
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                return PTR_ERR(new_metadata);
        }
        /* It worked. Update the inode */
@@ -149,6 +154,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
        if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
                jffs2_add_full_dnode_to_inode(c, f, new_metadata);
                inode->i_size = iattr->ia_size;
+               inode->i_blocks = (inode->i_size + 511) >> 9;
                f->metadata = NULL;
        } else {
                f->metadata = new_metadata;
@@ -159,7 +165,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
        }
        jffs2_free_raw_inode(ri);
 
-       up(&f->sem);
+       mutex_unlock(&f->sem);
        jffs2_complete_reservation(c);
 
        /* We have to do the vmtruncate() without f->sem held, since
@@ -167,8 +173,10 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
           We are protected from a simultaneous write() extending i_size
           back past iattr->ia_size, because do_truncate() holds the
           generic inode semaphore. */
-       if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
-               vmtruncate(inode, iattr->ia_size);
+       if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
+               vmtruncate(inode, iattr->ia_size);      
+               inode->i_blocks = (inode->i_size + 511) >> 9;
+       }       
 
        return 0;
 }
@@ -248,12 +256,12 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
        c = JFFS2_SB_INFO(inode->i_sb);
 
        jffs2_init_inode_info(f);
-       down(&f->sem);
+       mutex_lock(&f->sem);
 
        ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
 
        if (ret) {
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                iget_failed(inode);
                return ERR_PTR(ret);
        }
@@ -330,7 +338,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
                printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
        }
 
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 
        D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
        unlock_new_inode(inode);
@@ -339,7 +347,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
 error_io:
        ret = -EIO;
 error:
-       up(&f->sem);
+       mutex_unlock(&f->sem);
        jffs2_do_clear_inode(c, f);
        iget_failed(inode);
        return ERR_PTR(ret);
@@ -380,9 +388,9 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
           Flush the writebuffer, if neccecary, else we loose it */
        if (!(sb->s_flags & MS_RDONLY)) {
                jffs2_stop_garbage_collect_thread(c);
-               down(&c->alloc_sem);
+               mutex_lock(&c->alloc_sem);
                jffs2_flush_wbuf_pad(c);
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
        }
 
        if (!(*flags & MS_RDONLY))
@@ -429,7 +437,7 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
 
        f = JFFS2_INODE_INFO(inode);
        jffs2_init_inode_info(f);
-       down(&f->sem);
+       mutex_lock(&f->sem);
 
        memset(ri, 0, sizeof(*ri));
        /* Set OS-specific defaults for new inodes */
index 32ff0373aa04d0c9f709ec22c64ad9086c343448..bad005664e308b1fd2f30e49ccad91d7afd57b91 100644 (file)
@@ -126,7 +126,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
        int ret = 0, inum, nlink;
        int xattr = 0;
 
-       if (down_interruptible(&c->alloc_sem))
+       if (mutex_lock_interruptible(&c->alloc_sem))
                return -EINTR;
 
        for (;;) {
@@ -143,7 +143,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                               c->unchecked_size);
                        jffs2_dbg_dump_block_lists_nolock(c);
                        spin_unlock(&c->erase_completion_lock);
-                       up(&c->alloc_sem);
+                       mutex_unlock(&c->alloc_sem);
                        return -ENOSPC;
                }
 
@@ -190,7 +190,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                         made no progress in this case, but that should be OK */
                        c->checked_ino--;
 
-                       up(&c->alloc_sem);
+                       mutex_unlock(&c->alloc_sem);
                        sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
                        return 0;
 
@@ -210,7 +210,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                        printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
 
                jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                return ret;
        }
 
@@ -221,9 +221,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                jeb = jffs2_find_gc_block(c);
 
        if (!jeb) {
-               D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+               /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
+               if (!list_empty(&c->erase_pending_list)) {
+                       spin_unlock(&c->erase_completion_lock);
+                       mutex_unlock(&c->alloc_sem);
+                       return -EAGAIN;
+               }
+               D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
                spin_unlock(&c->erase_completion_lock);
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                return -EIO;
        }
 
@@ -232,7 +238,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
           printk(KERN_DEBUG "Nextblock at  %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
 
        if (!jeb->used_size) {
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                goto eraseit;
        }
 
@@ -248,7 +254,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                               jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
                        jeb->gc_node = raw;
                        spin_unlock(&c->erase_completion_lock);
-                       up(&c->alloc_sem);
+                       mutex_unlock(&c->alloc_sem);
                        BUG();
                }
        }
@@ -266,7 +272,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                        /* Just mark it obsolete */
                        jffs2_mark_node_obsolete(c, raw);
                }
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                goto eraseit_lock;
        }
 
@@ -334,7 +340,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                */
                printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
                       ic->ino, ic->state);
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                spin_unlock(&c->inocache_lock);
                BUG();
 
@@ -345,7 +351,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                   the alloc_sem() (for marking nodes invalid) so we must
                   drop the alloc_sem before sleeping. */
 
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
                          ic->ino, ic->state));
                sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
@@ -416,7 +422,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                ret = -ENOSPC;
        }
  release_sem:
-       up(&c->alloc_sem);
+       mutex_unlock(&c->alloc_sem);
 
  eraseit_lock:
        /* If we've finished this block, start it erasing */
@@ -445,7 +451,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_era
        uint32_t start = 0, end = 0, nrfrags = 0;
        int ret = 0;
 
-       down(&f->sem);
+       mutex_lock(&f->sem);
 
        /* Now we have the lock for this inode. Check that it's still the one at the head
           of the list. */
@@ -525,7 +531,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_era
                }
        }
  upnout:
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 
        return ret;
 }
@@ -846,7 +852,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
                /* Prevent the erase code from nicking the obsolete node refs while
                   we're looking at them. I really don't like this extra lock but
                   can't see any alternative. Suggestions on a postcard to... */
-               down(&c->erase_free_sem);
+               mutex_lock(&c->erase_free_sem);
 
                for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
 
@@ -899,7 +905,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
                        /* OK. The name really does match. There really is still an older node on
                           the flash which our deletion dirent obsoletes. So we have to write out
                           a new deletion dirent to replace it */
-                       up(&c->erase_free_sem);
+                       mutex_unlock(&c->erase_free_sem);
 
                        D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
                                  ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
@@ -908,7 +914,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
                        return jffs2_garbage_collect_dirent(c, jeb, f, fd);
                }
 
-               up(&c->erase_free_sem);
+               mutex_unlock(&c->erase_free_sem);
                kfree(rd);
        }
 
@@ -1081,7 +1087,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
        return 0;
 }
 
-static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb,
                                       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
                                       uint32_t start, uint32_t end)
 {
index f4d525b0ea5372d950494cae2e34a1553d445d5d..e2177210f62153bec393482eb6f2e2b58b48f0eb 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/fs.h>
+#include "nodelist.h"
 
 int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
                unsigned long arg)
index a841f4973a74824667b94e16f0cf7fe343ac82fa..31559f45fdde31eb511fc89bc564aa180fc1099b 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/version.h>
 #include <linux/rbtree.h>
 #include <linux/posix_acl.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
 
 struct jffs2_inode_info {
        /* We need an internal mutex similar to inode->i_mutex.
@@ -24,7 +24,7 @@ struct jffs2_inode_info {
           before letting GC proceed. Or we'd have to put ugliness
           into the GC code so it didn't attempt to obtain the i_mutex
           for the inode(s) which are already locked */
-       struct semaphore sem;
+       struct mutex sem;
 
        /* The highest (datanode) version number used for this ino */
        uint32_t highest_version;
index 18fca2b9e53192355fe26ca16f71c76b7af35d93..85ef6dbb1be7adb3dbbd3553f008a32fe1c35dfc 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/completion.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/list.h>
@@ -44,7 +44,7 @@ struct jffs2_sb_info {
        struct completion gc_thread_start; /* GC thread start completion */
        struct completion gc_thread_exit; /* GC thread exit completion port */
 
-       struct semaphore alloc_sem;     /* Used to protect all the following
+       struct mutex alloc_sem;         /* Used to protect all the following
                                           fields, and also to protect against
                                           out-of-order writing of nodes. And GC. */
        uint32_t cleanmarker_size;      /* Size of an _inline_ CLEANMARKER
@@ -87,6 +87,7 @@ struct jffs2_sb_info {
        struct list_head erasable_list;         /* Blocks which are completely dirty, and need erasing */
        struct list_head erasable_pending_wbuf_list;    /* Blocks which need erasing but only after the current wbuf is flushed */
        struct list_head erasing_list;          /* Blocks which are currently erasing */
+       struct list_head erase_checking_list;   /* Blocks which are being checked and marked */
        struct list_head erase_pending_list;    /* Blocks which need erasing now */
        struct list_head erase_complete_list;   /* Blocks which are erased and need the clean marker written to them */
        struct list_head free_list;             /* Blocks which are free and ready to be used */
@@ -104,7 +105,7 @@ struct jffs2_sb_info {
        /* Sem to allow jffs2_garbage_collect_deletion_dirent to
           drop the erase_completion_lock while it's holding a pointer
           to an obsoleted node. I don't like this. Alternatives welcomed. */
-       struct semaphore erase_free_sem;
+       struct mutex erase_free_sem;
 
        uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
 
index ec1aae9e695e9fc8955889659b59649a3fe45135..8219df6eb6d8c4191c78745c285272e5ce06f636 100644 (file)
@@ -87,7 +87,7 @@ struct jffs2_raw_node_ref
                xattr_ref or xattr_datum instead. The common part of those structures
                has NULL in the first word. See jffs2_raw_ref_to_ic() below */
        uint32_t flash_offset;
-#define TEST_TOTLEN
+#undef TEST_TOTLEN
 #ifdef TEST_TOTLEN
        uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
 #endif
index a0313fa8748e375d5ee1c2438794e5052faa360f..9df8f3ef20dfd8d8a39d59ee02d4e14ea2713927 100644 (file)
@@ -48,7 +48,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
        minsize = PAD(minsize);
 
        D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
-       down(&c->alloc_sem);
+       mutex_lock(&c->alloc_sem);
 
        D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
 
@@ -57,7 +57,6 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
        /* this needs a little more thought (true <tglx> :)) */
        while(ret == -EAGAIN) {
                while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
-                       int ret;
                        uint32_t dirty, avail;
 
                        /* calculate real dirty size
@@ -82,7 +81,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
                                          dirty, c->unchecked_size, c->sector_size));
 
                                spin_unlock(&c->erase_completion_lock);
-                               up(&c->alloc_sem);
+                               mutex_unlock(&c->alloc_sem);
                                return -ENOSPC;
                        }
 
@@ -105,11 +104,11 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
                                D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
                                          avail, blocksneeded * c->sector_size));
                                spin_unlock(&c->erase_completion_lock);
-                               up(&c->alloc_sem);
+                               mutex_unlock(&c->alloc_sem);
                                return -ENOSPC;
                        }
 
-                       up(&c->alloc_sem);
+                       mutex_unlock(&c->alloc_sem);
 
                        D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
                                  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
@@ -117,7 +116,10 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
                        spin_unlock(&c->erase_completion_lock);
 
                        ret = jffs2_garbage_collect_pass(c);
-                       if (ret)
+
+                       if (ret == -EAGAIN)
+                               jffs2_erase_pending_blocks(c, 1);
+                       else if (ret)
                                return ret;
 
                        cond_resched();
@@ -125,7 +127,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
                        if (signal_pending(current))
                                return -EINTR;
 
-                       down(&c->alloc_sem);
+                       mutex_lock(&c->alloc_sem);
                        spin_lock(&c->erase_completion_lock);
                }
 
@@ -138,7 +140,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
        if (!ret)
                ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
        if (ret)
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
        return ret;
 }
 
@@ -463,7 +465,7 @@ void jffs2_complete_reservation(struct jffs2_sb_info *c)
 {
        D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
        jffs2_garbage_collect_trigger(c);
-       up(&c->alloc_sem);
+       mutex_unlock(&c->alloc_sem);
 }
 
 static inline int on_list(struct list_head *obj, struct list_head *head)
@@ -512,7 +514,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
                   any jffs2_raw_node_refs. So we don't need to stop erases from
                   happening, or protect against people holding an obsolete
                   jffs2_raw_node_ref without the erase_completion_lock. */
-               down(&c->erase_free_sem);
+               mutex_lock(&c->erase_free_sem);
        }
 
        spin_lock(&c->erase_completion_lock);
@@ -715,7 +717,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
        }
 
  out_erase_sem:
-       up(&c->erase_free_sem);
+       mutex_unlock(&c->erase_free_sem);
 }
 
 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
index e512a93d624954e04b70ab0a27bf85eb43a61fea..4cb4d76de07f4810771e33edb6c8021d0a7896d1 100644 (file)
@@ -825,8 +825,9 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
        else // normal case...
                tn->fn->size = je32_to_cpu(rd->dsize);
 
-       dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
-                 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
+       dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
+                      ref_offset(ref), je32_to_cpu(rd->version),
+                      je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
 
        ret = jffs2_add_tn_to_tree(c, rii, tn);
 
@@ -836,13 +837,13 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
                jffs2_free_tmp_dnode_info(tn);
                return ret;
        }
-#ifdef JFFS2_DBG_READINODE_MESSAGES
-       dbg_readinode("After adding ver %d:\n", je32_to_cpu(rd->version));
+#ifdef JFFS2_DBG_READINODE2_MESSAGES
+       dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
        tn = tn_first(&rii->tn_root);
        while (tn) {
-               dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n",
-                            tn, tn->version, tn->fn->ofs,
-                            tn->fn->ofs+tn->fn->size, tn->overlapped);
+               dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
+                              tn, tn->version, tn->fn->ofs,
+                              tn->fn->ofs+tn->fn->size, tn->overlapped);
                tn = tn_next(tn);
        }
 #endif
@@ -1193,7 +1194,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
                        ret, retlen, sizeof(*latest_node));
                /* FIXME: If this fails, there seems to be a memory leak. Find it. */
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_do_clear_inode(c, f);
                return ret?ret:-EIO;
        }
@@ -1202,7 +1203,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
        if (crc != je32_to_cpu(latest_node->node_crc)) {
                JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
                        f->inocache->ino, ref_offset(rii.latest_ref));
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_do_clear_inode(c, f);
                return -EIO;
        }
@@ -1242,7 +1243,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                        f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
                        if (!f->target) {
                                JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
-                               up(&f->sem);
+                               mutex_unlock(&f->sem);
                                jffs2_do_clear_inode(c, f);
                                return -ENOMEM;
                        }
@@ -1255,7 +1256,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                                        ret = -EIO;
                                kfree(f->target);
                                f->target = NULL;
-                               up(&f->sem);
+                               mutex_unlock(&f->sem);
                                jffs2_do_clear_inode(c, f);
                                return -ret;
                        }
@@ -1273,14 +1274,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                if (f->metadata) {
                        JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
                               f->inocache->ino, jemode_to_cpu(latest_node->mode));
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        jffs2_do_clear_inode(c, f);
                        return -EIO;
                }
                if (!frag_first(&f->fragtree)) {
                        JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
                               f->inocache->ino, jemode_to_cpu(latest_node->mode));
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        jffs2_do_clear_inode(c, f);
                        return -EIO;
                }
@@ -1289,7 +1290,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                        JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
                               f->inocache->ino, jemode_to_cpu(latest_node->mode));
                        /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        jffs2_do_clear_inode(c, f);
                        return -EIO;
                }
@@ -1379,12 +1380,13 @@ int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
        if (!f)
                return -ENOMEM;
 
-       init_MUTEX_LOCKED(&f->sem);
+       mutex_init(&f->sem);
+       mutex_lock(&f->sem);
        f->inocache = ic;
 
        ret = jffs2_do_read_inode_internal(c, f, &n);
        if (!ret) {
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_do_clear_inode(c, f);
        }
        kfree (f);
@@ -1398,7 +1400,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
 
        jffs2_clear_acl(f);
        jffs2_xattr_delete_inode(c, f->inocache);
-       down(&f->sem);
+       mutex_lock(&f->sem);
        deleted = f->inocache && !f->inocache->nlink;
 
        if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
@@ -1430,5 +1432,5 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
                        jffs2_del_ino_cache(c, f->inocache);
        }
 
-       up(&f->sem);
+       mutex_unlock(&f->sem);
 }
index 4677355996cc8c49a284e56a909502fe788cf234..f3353df178e707e8072e048e46b23753e5c88d5d 100644 (file)
@@ -47,7 +47,7 @@ static void jffs2_i_init_once(struct kmem_cache *cachep, void *foo)
 {
        struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
 
-       init_MUTEX(&ei->sem);
+       mutex_init(&ei->sem);
        inode_init_once(&ei->vfs_inode);
 }
 
@@ -55,9 +55,9 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
 {
        struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
-       down(&c->alloc_sem);
+       mutex_lock(&c->alloc_sem);
        jffs2_flush_wbuf_pad(c);
-       up(&c->alloc_sem);
+       mutex_unlock(&c->alloc_sem);
        return 0;
 }
 
@@ -95,8 +95,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
 
        /* Initialize JFFS2 superblock locks, the further initialization will
         * be done later */
-       init_MUTEX(&c->alloc_sem);
-       init_MUTEX(&c->erase_free_sem);
+       mutex_init(&c->alloc_sem);
+       mutex_init(&c->erase_free_sem);
        init_waitqueue_head(&c->erase_wait);
        init_waitqueue_head(&c->inocache_wq);
        spin_lock_init(&c->erase_completion_lock);
@@ -125,9 +125,9 @@ static void jffs2_put_super (struct super_block *sb)
 
        D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
 
-       down(&c->alloc_sem);
+       mutex_lock(&c->alloc_sem);
        jffs2_flush_wbuf_pad(c);
-       up(&c->alloc_sem);
+       mutex_unlock(&c->alloc_sem);
 
        jffs2_sum_exit(c);
 
index d1d4f27464baf879825eb72ac4ad4812acfaa0fd..8de52b6076785816abf1625fbb2b64534a7c5610 100644 (file)
@@ -578,8 +578,8 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
        if (!jffs2_is_writebuffered(c))
                return 0;
 
-       if (!down_trylock(&c->alloc_sem)) {
-               up(&c->alloc_sem);
+       if (mutex_trylock(&c->alloc_sem)) {
+               mutex_unlock(&c->alloc_sem);
                printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
                BUG();
        }
@@ -702,10 +702,10 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
        if (!c->wbuf)
                return 0;
 
-       down(&c->alloc_sem);
+       mutex_lock(&c->alloc_sem);
        if (!jffs2_wbuf_pending_for_ino(c, ino)) {
                D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
                return 0;
        }
 
@@ -725,14 +725,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
        } else while (old_wbuf_len &&
                      old_wbuf_ofs == c->wbuf_ofs) {
 
-               up(&c->alloc_sem);
+               mutex_unlock(&c->alloc_sem);
 
                D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
 
                ret = jffs2_garbage_collect_pass(c);
                if (ret) {
                        /* GC failed. Flush it with padding instead */
-                       down(&c->alloc_sem);
+                       mutex_lock(&c->alloc_sem);
                        down_write(&c->wbuf_sem);
                        ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
                        /* retry flushing wbuf in case jffs2_wbuf_recover
@@ -742,12 +742,12 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
                        up_write(&c->wbuf_sem);
                        break;
                }
-               down(&c->alloc_sem);
+               mutex_lock(&c->alloc_sem);
        }
 
        D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
 
-       up(&c->alloc_sem);
+       mutex_unlock(&c->alloc_sem);
        return ret;
 }
 
@@ -1236,12 +1236,24 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
        if (!c->wbuf)
                return -ENOMEM;
 
+#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
+       c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+       if (!c->wbuf_verify) {
+               kfree(c->oobbuf);
+               kfree(c->wbuf);
+               return -ENOMEM;
+       }
+#endif
+
        printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
 
        return 0;
 }
 
 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
+#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
+       kfree(c->wbuf_verify);
+#endif
        kfree(c->wbuf);
 }
 
index 776f13cbf2b5e62fef50abfee752e71f93b5e0ee..665fce9797d38d4569a92056f1391bdb5c0a9279 100644 (file)
@@ -137,12 +137,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
                                                             JFFS2_SUMMARY_INODE_SIZE);
                        } else {
                                /* Locking pain */
-                               up(&f->sem);
+                               mutex_unlock(&f->sem);
                                jffs2_complete_reservation(c);
 
                                ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy,
                                                          alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
-                               down(&f->sem);
+                               mutex_lock(&f->sem);
                        }
 
                        if (!ret) {
@@ -285,12 +285,12 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
                                                             JFFS2_SUMMARY_DIRENT_SIZE(namelen));
                        } else {
                                /* Locking pain */
-                               up(&f->sem);
+                               mutex_unlock(&f->sem);
                                jffs2_complete_reservation(c);
 
                                ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy,
                                                          alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
-                               down(&f->sem);
+                               mutex_lock(&f->sem);
                        }
 
                        if (!ret) {
@@ -353,7 +353,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
                        D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
                        break;
                }
-               down(&f->sem);
+               mutex_lock(&f->sem);
                datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
                cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
 
@@ -381,7 +381,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
 
                if (IS_ERR(fn)) {
                        ret = PTR_ERR(fn);
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        jffs2_complete_reservation(c);
                        if (!retried) {
                                /* Write error to be retried */
@@ -403,11 +403,11 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
                        jffs2_mark_node_obsolete(c, fn->raw);
                        jffs2_free_full_dnode(fn);
 
-                       up(&f->sem);
+                       mutex_unlock(&f->sem);
                        jffs2_complete_reservation(c);
                        break;
                }
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_complete_reservation(c);
                if (!datalen) {
                        printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
@@ -439,7 +439,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
                                JFFS2_SUMMARY_INODE_SIZE);
        D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
        if (ret) {
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                return ret;
        }
 
@@ -454,7 +454,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
        if (IS_ERR(fn)) {
                D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
                /* Eeek. Wave bye bye */
-               up(&f->sem);
+               mutex_unlock(&f->sem);
                jffs2_complete_reservation(c);
                return PTR_ERR(fn);
        }
@@ -463,7 +463,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
        */
        f->metadata = fn;
 
-       up(&f->sem);
+       mutex_unlock(&f->sem);
        jffs2_complete_reservation(c);
 
        ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode);
@@ -489,7 +489,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
                return -ENOMEM;
        }
 
-       down(&dir_f->sem);
+       mutex_lock(&dir_f->sem);
 
        rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
        rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -513,7 +513,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
                /* dirent failed to write. Delete the inode normally
                   as if it were the final unlink() */
                jffs2_complete_reservation(c);
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
                return PTR_ERR(fd);
        }
 
@@ -522,7 +522,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
        jffs2_add_fd_to_list(c, fd, &dir_f->dents);
 
        jffs2_complete_reservation(c);
-       up(&dir_f->sem);
+       mutex_unlock(&dir_f->sem);
 
        return 0;
 }
@@ -551,7 +551,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
                        return ret;
                }
 
-               down(&dir_f->sem);
+               mutex_lock(&dir_f->sem);
 
                /* Build a deletion node */
                rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -574,21 +574,21 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
 
                if (IS_ERR(fd)) {
                        jffs2_complete_reservation(c);
-                       up(&dir_f->sem);
+                       mutex_unlock(&dir_f->sem);
                        return PTR_ERR(fd);
                }
 
                /* File it. This will mark the old one obsolete. */
                jffs2_add_fd_to_list(c, fd, &dir_f->dents);
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
        } else {
-               struct jffs2_full_dirent *fd = dir_f->dents;
                uint32_t nhash = full_name_hash(name, namelen);
 
+               fd = dir_f->dents;
                /* We don't actually want to reserve any space, but we do
                   want to be holding the alloc_sem when we write to flash */
-               down(&c->alloc_sem);
-               down(&dir_f->sem);
+               mutex_lock(&c->alloc_sem);
+               mutex_lock(&dir_f->sem);
 
                for (fd = dir_f->dents; fd; fd = fd->next) {
                        if (fd->nhash == nhash &&
@@ -607,7 +607,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
                                break;
                        }
                }
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
        }
 
        /* dead_f is NULL if this was a rename not a real unlink */
@@ -615,7 +615,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
           pointing to an inode which didn't exist. */
        if (dead_f && dead_f->inocache) {
 
-               down(&dead_f->sem);
+               mutex_lock(&dead_f->sem);
 
                if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) {
                        while (dead_f->dents) {
@@ -639,7 +639,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
 
                dead_f->inocache->nlink--;
                /* NB: Caller must set inode nlink if appropriate */
-               up(&dead_f->sem);
+               mutex_unlock(&dead_f->sem);
        }
 
        jffs2_complete_reservation(c);
@@ -666,7 +666,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
                return ret;
        }
 
-       down(&dir_f->sem);
+       mutex_lock(&dir_f->sem);
 
        /* Build a deletion node */
        rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -691,7 +691,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
 
        if (IS_ERR(fd)) {
                jffs2_complete_reservation(c);
-               up(&dir_f->sem);
+               mutex_unlock(&dir_f->sem);
                return PTR_ERR(fd);
        }
 
@@ -699,7 +699,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
        jffs2_add_fd_to_list(c, fd, &dir_f->dents);
 
        jffs2_complete_reservation(c);
-       up(&dir_f->sem);
+       mutex_unlock(&dir_f->sem);
 
        return 0;
 }
index 1f122c1940afa069a6deb31f82103b990867136a..4d81553d29485e69dd19e746a1edadf4618c8d91 100644 (file)
@@ -632,7 +632,7 @@ nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
                block->b_flags |= B_TIMED_OUT;
        if (conf) {
                if (block->b_fl)
-                       locks_copy_lock(block->b_fl, conf);
+                       __locks_copy_lock(block->b_fl, conf);
        }
 }
 
index dbbefbcd671255935c61783e5cf73d4122346cd7..d1c48b539df8723bd8979e502448eee35d4f9adf 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/lockd/lockd.h>
 #include <linux/lockd/share.h>
 #include <linux/lockd/sm_inter.h>
+#include <linux/module.h>
+#include <linux/mount.h>
 
 #define NLMDBG_FACILITY                NLMDBG_SVCSUBS
 
@@ -194,6 +196,12 @@ again:
        return 0;
 }
 
+static int
+nlmsvc_always_match(void *dummy1, struct nlm_host *dummy2)
+{
+       return 1;
+}
+
 /*
  * Inspect a single file
  */
@@ -230,7 +238,8 @@ nlm_file_inuse(struct nlm_file *file)
  * Loop over all files in the file table.
  */
 static int
-nlm_traverse_files(struct nlm_host *host, nlm_host_match_fn_t match)
+nlm_traverse_files(void *data, nlm_host_match_fn_t match,
+               int (*is_failover_file)(void *data, struct nlm_file *file))
 {
        struct hlist_node *pos, *next;
        struct nlm_file *file;
@@ -239,12 +248,14 @@ nlm_traverse_files(struct nlm_host *host, nlm_host_match_fn_t match)
        mutex_lock(&nlm_file_mutex);
        for (i = 0; i < FILE_NRHASH; i++) {
                hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) {
+                       if (is_failover_file && !is_failover_file(data, file))
+                               continue;
                        file->f_count++;
                        mutex_unlock(&nlm_file_mutex);
 
                        /* Traverse locks, blocks and shares of this file
                         * and update file->f_locks count */
-                       if (nlm_inspect_file(host, file, match))
+                       if (nlm_inspect_file(data, file, match))
                                ret = 1;
 
                        mutex_lock(&nlm_file_mutex);
@@ -303,21 +314,27 @@ nlm_release_file(struct nlm_file *file)
  *     Used by nlmsvc_invalidate_all
  */
 static int
-nlmsvc_mark_host(struct nlm_host *host, struct nlm_host *dummy)
+nlmsvc_mark_host(void *data, struct nlm_host *dummy)
 {
+       struct nlm_host *host = data;
+
        host->h_inuse = 1;
        return 0;
 }
 
 static int
-nlmsvc_same_host(struct nlm_host *host, struct nlm_host *other)
+nlmsvc_same_host(void *data, struct nlm_host *other)
 {
+       struct nlm_host *host = data;
+
        return host == other;
 }
 
 static int
-nlmsvc_is_client(struct nlm_host *host, struct nlm_host *dummy)
+nlmsvc_is_client(void *data, struct nlm_host *dummy)
 {
+       struct nlm_host *host = data;
+
        if (host->h_server) {
                /* we are destroying locks even though the client
                 * hasn't asked us too, so don't unmonitor the
@@ -337,7 +354,7 @@ void
 nlmsvc_mark_resources(void)
 {
        dprintk("lockd: nlmsvc_mark_resources\n");
-       nlm_traverse_files(NULL, nlmsvc_mark_host);
+       nlm_traverse_files(NULL, nlmsvc_mark_host, NULL);
 }
 
 /*
@@ -348,7 +365,7 @@ nlmsvc_free_host_resources(struct nlm_host *host)
 {
        dprintk("lockd: nlmsvc_free_host_resources\n");
 
-       if (nlm_traverse_files(host, nlmsvc_same_host)) {
+       if (nlm_traverse_files(host, nlmsvc_same_host, NULL)) {
                printk(KERN_WARNING
                        "lockd: couldn't remove all locks held by %s\n",
                        host->h_name);
@@ -368,5 +385,41 @@ nlmsvc_invalidate_all(void)
         * turn, which is about as inefficient as it gets.
         * Now we just do it once in nlm_traverse_files.
         */
-       nlm_traverse_files(NULL, nlmsvc_is_client);
+       nlm_traverse_files(NULL, nlmsvc_is_client, NULL);
+}
+
+static int
+nlmsvc_match_sb(void *datap, struct nlm_file *file)
+{
+       struct super_block *sb = datap;
+
+       return sb == file->f_file->f_path.mnt->mnt_sb;
+}
+
+int
+nlmsvc_unlock_all_by_sb(struct super_block *sb)
+{
+       int ret;
+
+       ret = nlm_traverse_files(sb, nlmsvc_always_match, nlmsvc_match_sb);
+       return ret ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb);
+
+static int
+nlmsvc_match_ip(void *datap, struct nlm_host *host)
+{
+       __be32 *server_addr = datap;
+
+       return host->h_saddr.sin_addr.s_addr == *server_addr;
+}
+
+int
+nlmsvc_unlock_all_by_ip(__be32 server_addr)
+{
+       int ret;
+       ret = nlm_traverse_files(&server_addr, nlmsvc_match_ip, NULL);
+       return ret ? -EIO : 0;
+
 }
+EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_ip);
index 592faadbcec12f19b2f6ea158e956e2a3b01c838..e1ea2fe036811a7a4f68702655a39c28111e033a 100644 (file)
@@ -224,7 +224,7 @@ static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
 /*
  * Initialize a new lock from an existing file_lock structure.
  */
-static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
 {
        new->fl_owner = fl->fl_owner;
        new->fl_pid = fl->fl_pid;
@@ -833,7 +833,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        if (!posix_locks_conflict(request, fl))
                                continue;
                        if (conflock)
-                               locks_copy_lock(conflock, fl);
+                               __locks_copy_lock(conflock, fl);
                        error = -EAGAIN;
                        if (!(request->fl_flags & FL_SLEEP))
                                goto out;
@@ -1367,18 +1367,20 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
 
        lease = *flp;
 
-       error = -EAGAIN;
-       if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
-               goto out;
-       if ((arg == F_WRLCK)
-           && ((atomic_read(&dentry->d_count) > 1)
-               || (atomic_read(&inode->i_count) > 1)))
-               goto out;
+       if (arg != F_UNLCK) {
+               error = -ENOMEM;
+               new_fl = locks_alloc_lock();
+               if (new_fl == NULL)
+                       goto out;
 
-       error = -ENOMEM;
-       new_fl = locks_alloc_lock();
-       if (new_fl == NULL)
-               goto out;
+               error = -EAGAIN;
+               if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
+                       goto out;
+               if ((arg == F_WRLCK)
+                   && ((atomic_read(&dentry->d_count) > 1)
+                       || (atomic_read(&inode->i_count) > 1)))
+                       goto out;
+       }
 
        /*
         * At this point, we know that if there is an exclusive
@@ -1404,6 +1406,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
                        rdlease_count++;
        }
 
+       error = -EAGAIN;
        if ((arg == F_RDLCK && (wrlease_count > 0)) ||
            (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
                goto out;
@@ -1490,8 +1493,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
 {
        struct file_lock fl, *flp = &fl;
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = filp->f_path.dentry->d_inode;
        int error;
 
        locks_init_lock(&fl);
index 55dfdd71f1b0d8be42a3ce01a4a15e8228b9aaa0..8799b8708188a3acf901f1d09b8e773865d139c5 100644 (file)
@@ -2712,9 +2712,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        * Note: locks.c uses the BKL to protect the inode's lock list.
        */
 
-       /* XXX?: Just to divert the locks_release_private at the start of
-        * locks_copy_lock: */
-       locks_init_lock(&conflock);
        err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
        switch (-err) {
        case 0: /* success! */
index 1ba7ad981935d6f455d5c3185c78ba0842913898..c513bbdf2d36a2188d286ae1e32cbb81fab16107 100644 (file)
@@ -376,20 +376,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *ia
                        goto xdr_error;
                }
        }
-       if (bmval[1] & FATTR4_WORD1_TIME_METADATA) {
-               /* We require the high 32 bits of 'seconds' to be 0, and we ignore
-                  all 32 bits of 'nseconds'. */
-               READ_BUF(12);
-               len += 12;
-               READ32(dummy32);
-               if (dummy32)
-                       return nfserr_inval;
-               READ32(iattr->ia_ctime.tv_sec);
-               READ32(iattr->ia_ctime.tv_nsec);
-               if (iattr->ia_ctime.tv_nsec >= (u32)1000000000)
-                       return nfserr_inval;
-               iattr->ia_valid |= ATTR_CTIME;
-       }
        if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
                READ_BUF(4);
                len += 4;
index 613bcb8171a5da1a649f5a5ab01601e42830a9f5..42f3820ee8f543ca022d019b3b1de49e5907648c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/seq_file.h>
 #include <linux/pagemap.h>
 #include <linux/init.h>
+#include <linux/inet.h>
 #include <linux/string.h>
 #include <linux/smp_lock.h>
 #include <linux/ctype.h>
@@ -35,6 +36,7 @@
 #include <linux/nfsd/cache.h>
 #include <linux/nfsd/xdr.h>
 #include <linux/nfsd/syscall.h>
+#include <linux/lockd/lockd.h>
 
 #include <asm/uaccess.h>
 #include <net/ipv6.h>
@@ -53,6 +55,8 @@ enum {
        NFSD_Getfs,
        NFSD_List,
        NFSD_Fh,
+       NFSD_FO_UnlockIP,
+       NFSD_FO_UnlockFS,
        NFSD_Threads,
        NFSD_Pool_Threads,
        NFSD_Versions,
@@ -89,6 +93,9 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
 static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
 #endif
 
+static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size);
+static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size);
+
 static ssize_t (*write_op[])(struct file *, char *, size_t) = {
        [NFSD_Svc] = write_svc,
        [NFSD_Add] = write_add,
@@ -98,6 +105,8 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
        [NFSD_Getfd] = write_getfd,
        [NFSD_Getfs] = write_getfs,
        [NFSD_Fh] = write_filehandle,
+       [NFSD_FO_UnlockIP] = failover_unlock_ip,
+       [NFSD_FO_UnlockFS] = failover_unlock_fs,
        [NFSD_Threads] = write_threads,
        [NFSD_Pool_Threads] = write_pool_threads,
        [NFSD_Versions] = write_versions,
@@ -298,6 +307,58 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
        return err;
 }
 
+static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size)
+{
+       __be32 server_ip;
+       char *fo_path, c;
+       int b1, b2, b3, b4;
+
+       /* sanity check */
+       if (size == 0)
+               return -EINVAL;
+
+       if (buf[size-1] != '\n')
+               return -EINVAL;
+
+       fo_path = buf;
+       if (qword_get(&buf, fo_path, size) < 0)
+               return -EINVAL;
+
+       /* get ipv4 address */
+       if (sscanf(fo_path, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
+               return -EINVAL;
+       server_ip = htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
+
+       return nlmsvc_unlock_all_by_ip(server_ip);
+}
+
+static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+{
+       struct nameidata nd;
+       char *fo_path;
+       int error;
+
+       /* sanity check */
+       if (size == 0)
+               return -EINVAL;
+
+       if (buf[size-1] != '\n')
+               return -EINVAL;
+
+       fo_path = buf;
+       if (qword_get(&buf, fo_path, size) < 0)
+               return -EINVAL;
+
+       error = path_lookup(fo_path, 0, &nd);
+       if (error)
+               return error;
+
+       error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
+
+       path_put(&nd.path);
+       return error;
+}
+
 static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
 {
        /* request is:
@@ -700,6 +761,10 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
                [NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+               [NFSD_FO_UnlockIP] = {"unlock_ip",
+                                       &transaction_ops, S_IWUSR|S_IRUSR},
+               [NFSD_FO_UnlockFS] = {"unlock_filesystem",
+                                       &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/include/asm-arm/arch-pxa/pxa3xx_nand.h b/include/asm-arm/arch-pxa/pxa3xx_nand.h
new file mode 100644 (file)
index 0000000..81a8937
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef __ASM_ARCH_PXA3XX_NAND_H
+#define __ASM_ARCH_PXA3XX_NAND_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+struct pxa3xx_nand_platform_data {
+
+       /* the data flash bus is shared between the Static Memory
+        * Controller and the Data Flash Controller,  the arbiter
+        * controls the ownership of the bus
+        */
+       int     enable_arbiter;
+
+       struct mtd_partition *parts;
+       unsigned int    nr_parts;
+};
+#endif /* __ASM_ARCH_PXA3XX_NAND_H */
index 8816f7f9cee15add2e5614d281db721c374c0392..ad6bbe90616e2e026810deabffe71f90fafd2ab5 100644 (file)
 */
 
 struct s3c2410_nand_set {
+       unsigned int            disable_ecc : 1;
+
        int                     nr_chips;
        int                     nr_partitions;
        char                    *name;
        int                     *nr_map;
        struct mtd_partition    *partitions;
+       struct nand_ecclayout   *ecc_layout;
 };
 
 struct s3c2410_platform_nand {
@@ -36,6 +39,8 @@ struct s3c2410_platform_nand {
        int     twrph0; /* active time for nWE/nOE */
        int     twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
 
+       unsigned int    ignore_unset_ecc : 1;
+
        int                     nr_sets;
        struct s3c2410_nand_set *sets;
 
index 382eb271a89203cc2439fe8a1fb4b38250725648..5bd206973dca3cf184b859a1c2031ce6d3501ff3 100644 (file)
@@ -1,5 +1,13 @@
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
 #ifdef CONFIG_X86_32
 # include "fixmap_32.h"
 #else
 # include "fixmap_64.h"
 #endif
+
+#define clear_fixmap(idx)                      \
+       __set_fixmap(idx, 0, __pgprot(0))
+
+#endif
index eb1665125c443fac0cb1a474b2d7ae4d4c76f4de..4b96148e90c15587f715969f2ff4c6fd5a07d4d8 100644 (file)
@@ -10,8 +10,8 @@
  * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  */
 
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
+#ifndef _ASM_FIXMAP_32_H
+#define _ASM_FIXMAP_32_H
 
 
 /* used by vmalloc.c, vsyscall.lds.S.
@@ -121,9 +121,6 @@ extern void reserve_top_address(unsigned long reserve);
 #define set_fixmap_nocache(idx, phys)                  \
        __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
 
-#define clear_fixmap(idx)                      \
-       __set_fixmap(idx, 0, __pgprot(0))
-
 #define FIXADDR_TOP    ((unsigned long)__FIXADDR_TOP)
 
 #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
index f3d76858c0e6f4b5c380736a1db9b79afcc4242f..355d26a75a821a87aa298ee52f79617015dc7a8e 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1998 Ingo Molnar
  */
 
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
+#ifndef _ASM_FIXMAP_64_H
+#define _ASM_FIXMAP_64_H
 
 #include <linux/kernel.h>
 #include <asm/apicdef.h>
index 3d419398499b4a6fe14de3eec92761f2e80ba19a..0f13b945e2400323cb65dfd6f8a7cc5b035c3319 100644 (file)
@@ -220,11 +220,13 @@ struct pv_mmu_ops {
                                 unsigned long va);
 
        /* Hooks for allocating/releasing pagetable pages */
-       void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
-       void (*alloc_pd)(struct mm_struct *mm, u32 pfn);
-       void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
-       void (*release_pt)(u32 pfn);
-       void (*release_pd)(u32 pfn);
+       void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
+       void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
+       void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
+       void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
+       void (*release_pte)(u32 pfn);
+       void (*release_pmd)(u32 pfn);
+       void (*release_pud)(u32 pfn);
 
        /* Pagetable manipulation functions */
        void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -910,28 +912,37 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
        PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
 }
 
-static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
 {
-       PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
+       PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
 }
-static inline void paravirt_release_pt(unsigned pfn)
+static inline void paravirt_release_pte(unsigned pfn)
 {
-       PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
+       PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
 }
 
-static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
 {
-       PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn);
+       PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
 }
 
-static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
-                                          unsigned start, unsigned count)
+static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
+                                           unsigned start, unsigned count)
 {
-       PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
+       PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
 }
-static inline void paravirt_release_pd(unsigned pfn)
+static inline void paravirt_release_pmd(unsigned pfn)
 {
-       PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
+       PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
+}
+
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
+{
+       PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
+}
+static inline void paravirt_release_pud(unsigned pfn)
+{
+       PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
 }
 
 #ifdef CONFIG_HIGHPTE
index 5886eed05886d897a97f59200c5f33c3af37be95..91e4641f3f319de6072c63b436c17fb252c9e5b6 100644 (file)
@@ -1,5 +1,110 @@
-#ifdef CONFIG_X86_32
-# include "pgalloc_32.h"
+#ifndef _ASM_X86_PGALLOC_H
+#define _ASM_X86_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/mm.h>          /* for struct page */
+#include <linux/pagemap.h>
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
 #else
-# include "pgalloc_64.h"
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
+static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
+                                           unsigned long start, unsigned long count) {}
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
+static inline void paravirt_release_pte(unsigned long pfn) {}
+static inline void paravirt_release_pmd(unsigned long pfn) {}
+static inline void paravirt_release_pud(unsigned long pfn) {}
 #endif
+
+/*
+ * Allocate and free page tables.
+ */
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
+
+/* Should really implement gc for free page table pages. This could be
+   done with a reference count in struct page. */
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+       BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
+       free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+       __free_page(pte);
+}
+
+extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+                                      pmd_t *pmd, pte_t *pte)
+{
+       paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+       set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+                               struct page *pte)
+{
+       unsigned long pfn = page_to_pfn(pte);
+
+       paravirt_alloc_pte(mm, pfn);
+       set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#if PAGETABLE_LEVELS > 2
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+       return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+       BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+       free_page((unsigned long)pmd);
+}
+
+extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
+
+#ifdef CONFIG_X86_PAE
+extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+#else  /* !CONFIG_X86_PAE */
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+       set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+}
+#endif /* CONFIG_X86_PAE */
+
+#if PAGETABLE_LEVELS > 3
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+       paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
+       set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+       return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+       BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+       free_page((unsigned long)pud);
+}
+
+extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
+#endif /* PAGETABLE_LEVELS > 3 */
+#endif /* PAGETABLE_LEVELS > 2 */
+
+#endif /* _ASM_X86_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
deleted file mode 100644 (file)
index 6bea6e5..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef _I386_PGALLOC_H
-#define _I386_PGALLOC_H
-
-#include <linux/threads.h>
-#include <linux/mm.h>          /* for struct page */
-#include <linux/pagemap.h>
-#include <asm/tlb.h>
-#include <asm-generic/tlb.h>
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-#define paravirt_alloc_pt(mm, pfn) do { } while (0)
-#define paravirt_alloc_pd(mm, pfn) do { } while (0)
-#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
-#define paravirt_release_pt(pfn) do { } while (0)
-#define paravirt_release_pd(pfn) do { } while (0)
-#endif
-
-static inline void pmd_populate_kernel(struct mm_struct *mm,
-                                      pmd_t *pmd, pte_t *pte)
-{
-       paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
-       set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
-}
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-{
-       unsigned long pfn = page_to_pfn(pte);
-
-       paravirt_alloc_pt(mm, pfn);
-       set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
-}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-/*
- * Allocate and free page tables.
- */
-extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-       free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-       pgtable_page_dtor(pte);
-       __free_page(pte);
-}
-
-
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
-
-#ifdef CONFIG_X86_PAE
-/*
- * In the PAE case we free the pmds as part of the pgd.
- */
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
-       return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
-       BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-       free_page((unsigned long)pmd);
-}
-
-extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
-
-static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
-{
-       paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
-
-       /* Note: almost everything apart from _PAGE_PRESENT is
-          reserved at the pmd (PDPT) level. */
-       set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
-
-       /*
-        * According to Intel App note "TLBs, Paging-Structure Caches,
-        * and Their Invalidation", April 2007, document 317080-001,
-        * section 8.1: in PAE mode we explicitly have to flush the
-        * TLB via cr3 if the top-level pgd is changed...
-        */
-       if (mm == current->active_mm)
-               write_cr3(read_cr3());
-}
-#endif /* CONFIG_X86_PAE */
-
-#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_64.h b/include/asm-x86/pgalloc_64.h
deleted file mode 100644 (file)
index 8d67223..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#ifndef _X86_64_PGALLOC_H
-#define _X86_64_PGALLOC_H
-
-#include <asm/pda.h>
-#include <linux/threads.h>
-#include <linux/mm.h>
-
-#define pmd_populate_kernel(mm, pmd, pte) \
-               set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
-#define pud_populate(mm, pud, pmd) \
-               set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
-#define pgd_populate(mm, pgd, pud) \
-               set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
-
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-{
-       set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
-       BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-       free_page((unsigned long)pmd);
-}
-
-static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
-{
-       return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
-       return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pud_free(struct mm_struct *mm, pud_t *pud)
-{
-       BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-       free_page((unsigned long)pud);
-}
-
-static inline void pgd_list_add(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-       unsigned long flags;
-
-       spin_lock_irqsave(&pgd_lock, flags);
-       list_add(&page->lru, &pgd_list);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-       unsigned long flags;
-
-       spin_lock_irqsave(&pgd_lock, flags);
-       list_del(&page->lru);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-       unsigned boundary;
-       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-       if (!pgd)
-               return NULL;
-       pgd_list_add(pgd);
-       /*
-        * Copy kernel pointers in from init.
-        * Could keep a freelist or slab cache of those because the kernel
-        * part never changes.
-        */
-       boundary = pgd_index(__PAGE_OFFSET);
-       memset(pgd, 0, boundary * sizeof(pgd_t));
-       memcpy(pgd + boundary,
-              init_level4_pgt + boundary,
-              (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
-       return pgd;
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-       BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
-       pgd_list_del(pgd);
-       free_page((unsigned long)pgd);
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-       return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-       struct page *page;
-       void *p;
-
-       p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-       if (!p)
-               return NULL;
-       page = virt_to_page(p);
-       pgtable_page_ctor(page);
-       return page;
-}
-
-/* Should really implement gc for free page table pages. This could be
-   done with a reference count in struct page. */
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-       BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
-       free_page((unsigned long)pte); 
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-       pgtable_page_dtor(pte);
-       __free_page(pte);
-} 
-
-#define __pte_free_tlb(tlb,pte)                                \
-do {                                                   \
-       pgtable_page_dtor((pte));                               \
-       tlb_remove_page((tlb), (pte));                  \
-} while (0)
-
-#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
-#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
-
-#endif /* _X86_64_PGALLOC_H */
index f1d9f4a03f6fafc0dfe8de3634196f571608d6e4..b8a08bd7bd48db84f62e7653a32e0c5e14fb3254 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _ASM_X86_PGTABLE_H
 #define _ASM_X86_PGTABLE_H
 
-#define USER_PTRS_PER_PGD      ((TASK_SIZE-1)/PGDIR_SIZE+1)
 #define FIRST_USER_ADDRESS     0
 
 #define _PAGE_BIT_PRESENT      0       /* is present */
@@ -330,6 +329,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 # include "pgtable_64.h"
 #endif
 
+#define KERNEL_PGD_BOUNDARY    pgd_index(PAGE_OFFSET)
+#define KERNEL_PGD_PTRS                (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
 #ifndef __ASSEMBLY__
 
 enum {
@@ -389,37 +391,17 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  * bit at the same time.
  */
 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define ptep_set_access_flags(vma, address, ptep, entry, dirty)                \
-({                                                                     \
-       int __changed = !pte_same(*(ptep), entry);                      \
-       if (__changed && dirty) {                                       \
-               *ptep = entry;                                          \
-               pte_update_defer((vma)->vm_mm, (address), (ptep));      \
-               flush_tlb_page(vma, address);                           \
-       }                                                               \
-       __changed;                                                      \
-})
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+                                unsigned long address, pte_t *ptep,
+                                pte_t entry, int dirty);
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(vma, addr, ptep) ({                  \
-       int __ret = 0;                                                  \
-       if (pte_young(*(ptep)))                                         \
-               __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,          \
-                                          &(ptep)->pte);               \
-       if (__ret)                                                      \
-               pte_update((vma)->vm_mm, addr, ptep);                   \
-       __ret;                                                          \
-})
+extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
+                                    unsigned long addr, pte_t *ptep);
 
 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(vma, address, ptep)                     \
-({                                                                     \
-       int __young;                                                    \
-       __young = ptep_test_and_clear_young((vma), (address), (ptep));  \
-       if (__young)                                                    \
-               flush_tlb_page(vma, address);                           \
-       __young;                                                        \
-})
+extern int ptep_clear_flush_young(struct vm_area_struct *vma,
+                                 unsigned long address, pte_t *ptep);
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
@@ -456,6 +438,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
        pte_update(mm, addr, ptep);
 }
 
+/*
+ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
+ *
+ *  dst - pointer to pgd range anwhere on a pgd page
+ *  src - ""
+ *  count - the number of pgds to copy.
+ *
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
+{
+       memcpy(dst, src, count * sizeof(pgd_t));
+}
+
+
 #include <asm-generic/pgtable.h>
 #endif /* __ASSEMBLY__ */
 
index c4a6436744582e35676c06de5833402bc4de30d3..168b6447cf185891e40a25b5734d15713ad4f15f 100644 (file)
@@ -48,9 +48,6 @@ void paging_init(void);
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE - 1))
 
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
  * physical memory until the kernel virtual memory starts.  That means that
@@ -108,21 +105,6 @@ extern int pmd_bad(pmd_t pmd);
 # include <asm/pgtable-2level.h>
 #endif
 
-/*
- * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
- *
- *  dst - pointer to pgd range anwhere on a pgd page
- *  src - ""
- *  count - the number of pgds to copy.
- *
- * dst and src can be on the same page, but the range must not overlap,
- * and must not cross a page boundary.
- */
-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
-{
-       memcpy(dst, src, count * sizeof(pgd_t));
-}
-
 /*
  * Macro to mark a page protection value as "uncacheable".
  * On processors which do not support it, this is a no-op.
index 9fd87d0b6477d209177790c169558b87599934a9..a3bbf8766c1d62790d2f999bba1edac5dab7fa64 100644 (file)
@@ -24,7 +24,7 @@ extern void paging_init(void);
 
 #endif /* !__ASSEMBLY__ */
 
-#define SHARED_KERNEL_PMD      1
+#define SHARED_KERNEL_PMD      0
 
 /*
  * PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
new file mode 100644 (file)
index 0000000..596312a
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef __XEN_EVENTS_H
+#define __XEN_EVENTS_H
+
+enum ipi_vector {
+       XEN_RESCHEDULE_VECTOR,
+       XEN_CALL_FUNCTION_VECTOR,
+
+       XEN_NR_IPIS,
+};
+
+static inline int xen_irqs_disabled(struct pt_regs *regs)
+{
+       return raw_irqs_disabled_flags(regs->flags);
+}
+
+static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
+{
+       regs->orig_ax = ~irq;
+       do_IRQ(regs);
+}
+
+#endif /* __XEN_EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
new file mode 100644 (file)
index 0000000..2444d45
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef __XEN_GRANT_TABLE_H
+#define __XEN_GRANT_TABLE_H
+
+#define xen_alloc_vm_area(size)        alloc_vm_area(size)
+#define xen_free_vm_area(area) free_vm_area(area)
+
+#endif /* __XEN_GRANT_TABLE_H */
index bc0ee7d961ca8d55ac98ebcfb9607a3be920603b..c2ccd997ed3537f00ff3ea78195a845d3dd27d0c 100644 (file)
@@ -163,6 +163,12 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
                           failsafe_selector, failsafe_address);
 }
 
+static inline int
+HYPERVISOR_callback_op(int cmd, void *arg)
+{
+       return _hypercall2(int, callback_op, cmd, arg);
+}
+
 static inline int
 HYPERVISOR_fpu_taskswitch(int set)
 {
index 165c3968e1380df0d5b3a06ad774486c10364c46..6227000a1e840780512b31e1e246bf7cd14ee716 100644 (file)
 #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
 #define GUEST_HANDLE(name)        __guest_handle_ ## name
 
+#ifdef __XEN__
+#if defined(__i386__)
+#define set_xen_guest_handle(hnd, val)                 \
+       do {                                            \
+               if (sizeof(hnd) == 8)                   \
+                       *(uint64_t *)&(hnd) = 0;        \
+               (hnd).p = val;                          \
+       } while (0)
+#elif defined(__x86_64__)
+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
+#endif
+#else
+#if defined(__i386__)
+#define set_xen_guest_handle(hnd, val)                 \
+       do {                                            \
+               if (sizeof(hnd) == 8)                   \
+                       *(uint64_t *)&(hnd) = 0;        \
+               (hnd) = val;                            \
+       } while (0)
+#elif defined(__x86_64__)
+#define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
+#endif
+#endif
+
 #ifndef __ASSEMBLY__
 /* Guest handles for primitive C types. */
 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
@@ -171,6 +195,10 @@ struct arch_vcpu_info {
     unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
 };
 
+struct xen_callback {
+       unsigned long cs;
+       unsigned long eip;
+};
 #endif /* !__ASSEMBLY__ */
 
 /*
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
new file mode 100644 (file)
index 0000000..0179930
--- /dev/null
@@ -0,0 +1,168 @@
+#ifndef __XEN_PAGE_H
+#define __XEN_PAGE_H
+
+#include <linux/pfn.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#include <xen/features.h>
+
+/* Xen machine address */
+typedef struct xmaddr {
+       phys_addr_t maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+       phys_addr_t paddr;
+} xpaddr_t;
+
+#define XMADDR(x)      ((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x)      ((xpaddr_t) { .paddr = (x) })
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY      (~0UL)
+#define FOREIGN_FRAME_BIT      (1UL<<31)
+#define FOREIGN_FRAME(m)       ((m) | FOREIGN_FRAME_BIT)
+
+extern unsigned long *phys_to_machine_mapping;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return pfn;
+
+       return phys_to_machine_mapping[(unsigned int)(pfn)] &
+               ~FOREIGN_FRAME_BIT;
+}
+
+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return 1;
+
+       return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+       unsigned long pfn;
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return mfn;
+
+#if 0
+       if (unlikely((mfn >> machine_to_phys_order) != 0))
+               return max_mapnr;
+#endif
+
+       pfn = 0;
+       /*
+        * The array access can fail (e.g., device space beyond end of RAM).
+        * In such cases it doesn't matter what we return (we return garbage),
+        * but we must handle the fault without crashing!
+        */
+       __get_user(pfn, &machine_to_phys_mapping[mfn]);
+
+       return pfn;
+}
+
+static inline xmaddr_t phys_to_machine(xpaddr_t phys)
+{
+       unsigned offset = phys.paddr & ~PAGE_MASK;
+       return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
+}
+
+static inline xpaddr_t machine_to_phys(xmaddr_t machine)
+{
+       unsigned offset = machine.maddr & ~PAGE_MASK;
+       return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
+}
+
+/*
+ * We detect special mappings in one of two ways:
+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
+ *     to be outside our maximum possible pseudophys range.
+ *  2. If the MFN belongs to a different domain then we will certainly
+ *     not have MFN in our p2m table. Conversely, if the page is ours,
+ *     then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
+ */
+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
+{
+       extern unsigned long max_mapnr;
+       unsigned long pfn = mfn_to_pfn(mfn);
+       if ((pfn < max_mapnr)
+           && !xen_feature(XENFEAT_auto_translated_physmap)
+           && (phys_to_machine_mapping[pfn] != mfn))
+               return max_mapnr; /* force !pfn_valid() */
+       return pfn;
+}
+
+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap)) {
+               BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+               return;
+       }
+       phys_to_machine_mapping[pfn] = mfn;
+}
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(v)     (phys_to_machine(XPADDR(__pa(v))))
+#define virt_to_mfn(v)         (pfn_to_mfn(PFN_DOWN(__pa(v))))
+#define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+
+static inline unsigned long pte_mfn(pte_t pte)
+{
+       return (pte.pte & ~_PAGE_NX) >> PAGE_SHIFT;
+}
+
+static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+       pte_t pte;
+
+       pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
+               (pgprot_val(pgprot) & __supported_pte_mask);
+
+       return pte;
+}
+
+static inline pteval_t pte_val_ma(pte_t pte)
+{
+       return pte.pte;
+}
+
+static inline pte_t __pte_ma(pteval_t x)
+{
+       return (pte_t) { .pte = x };
+}
+
+#ifdef CONFIG_X86_PAE
+#define pmd_val_ma(v) ((v).pmd)
+#define pud_val_ma(v) ((v).pgd.pgd)
+#define __pmd_ma(x)    ((pmd_t) { (x) } )
+#else  /* !X86_PAE */
+#define pmd_val_ma(v)  ((v).pud.pgd.pgd)
+#endif /* CONFIG_X86_PAE */
+
+#define pgd_val_ma(x)  ((x).pgd)
+
+
+xmaddr_t arbitrary_virt_to_machine(unsigned long address);
+void make_lowmem_page_readonly(void *vaddr);
+void make_lowmem_page_readwrite(void *vaddr);
+
+#endif /* __XEN_PAGE_H */
index cb784579956bb35a3f11abde8178ce6ba2da1e44..ad3b787479a49ef54daff54e12f505b2833fd731 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2001 Sistina Software (UK) Limited.
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the LGPL.
  */
@@ -10,6 +10,8 @@
 
 #ifdef __KERNEL__
 
+#include <linux/bio.h>
+
 struct dm_target;
 struct dm_table;
 struct dm_dev;
@@ -250,11 +252,97 @@ void dm_table_event(struct dm_table *t);
  */
 int dm_swap_table(struct mapped_device *md, struct dm_table *t);
 
+/*-----------------------------------------------------------------
+ * Macros.
+ *---------------------------------------------------------------*/
+#define DM_NAME "device-mapper"
+
+#define DMERR(f, arg...) \
+       printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMERR_LIMIT(f, arg...) \
+       do { \
+               if (printk_ratelimit()) \
+                       printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
+                              f "\n", ## arg); \
+       } while (0)
+
+#define DMWARN(f, arg...) \
+       printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMWARN_LIMIT(f, arg...) \
+       do { \
+               if (printk_ratelimit()) \
+                       printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
+                              f "\n", ## arg); \
+       } while (0)
+
+#define DMINFO(f, arg...) \
+       printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMINFO_LIMIT(f, arg...) \
+       do { \
+               if (printk_ratelimit()) \
+                       printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
+                              "\n", ## arg); \
+       } while (0)
+
+#ifdef CONFIG_DM_DEBUG
+#  define DMDEBUG(f, arg...) \
+       printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
+#  define DMDEBUG_LIMIT(f, arg...) \
+       do { \
+               if (printk_ratelimit()) \
+                       printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
+                              "\n", ## arg); \
+       } while (0)
+#else
+#  define DMDEBUG(f, arg...) do {} while (0)
+#  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
+#endif
+
+#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
+                         0 : scnprintf(result + sz, maxlen - sz, x))
+
+#define SECTOR_SHIFT 9
+
+/*
+ * Definitions of return values from target end_io function.
+ */
+#define DM_ENDIO_INCOMPLETE    1
+#define DM_ENDIO_REQUEUE       2
+
+/*
+ * Definitions of return values from target map function.
+ */
+#define DM_MAPIO_SUBMITTED     0
+#define DM_MAPIO_REMAPPED      1
+#define DM_MAPIO_REQUEUE       DM_ENDIO_REQUEUE
+
+/*
+ * Ceiling(n / sz)
+ */
+#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
+
+#define dm_sector_div_up(n, sz) ( \
+{ \
+       sector_t _r = ((n) + (sz) - 1); \
+       sector_div(_r, (sz)); \
+       _r; \
+} \
+)
+
 /*
- * Prepare a table for a device that will error all I/O.
- * To make it active, call dm_suspend(), dm_swap_table() then dm_resume().
+ * ceiling(n / size) * size
  */
-int dm_create_error_table(struct dm_table **result, struct mapped_device *md);
+#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
+
+static inline sector_t to_sector(unsigned long n)
+{
+       return (n >> SECTOR_SHIFT);
+}
+
+static inline unsigned long to_bytes(sector_t n)
+{
+       return (n << SECTOR_SHIFT);
+}
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_DEVICE_MAPPER_H */
similarity index 53%
rename from drivers/md/dm-log.h
rename to include/linux/dm-dirty-log.h
index 3fae87eb59631bcaf77c82b0924e363716643b0c..600c5fb2daad4c231b53a4654316469895af9b0c 100644 (file)
@@ -1,52 +1,56 @@
 /*
  * Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper dirty region log.
  *
  * This file is released under the LGPL.
  */
 
-#ifndef DM_DIRTY_LOG
-#define DM_DIRTY_LOG
+#ifndef _LINUX_DM_DIRTY_LOG
+#define _LINUX_DM_DIRTY_LOG
+
+#ifdef __KERNEL__
 
-#include "dm.h"
+#include <linux/types.h>
+#include <linux/device-mapper.h>
 
 typedef sector_t region_t;
 
-struct dirty_log_type;
+struct dm_dirty_log_type;
 
-struct dirty_log {
-       struct dirty_log_type *type;
+struct dm_dirty_log {
+       struct dm_dirty_log_type *type;
        void *context;
 };
 
-struct dirty_log_type {
-       struct list_head list;
+struct dm_dirty_log_type {
        const char *name;
        struct module *module;
-       unsigned int use_count;
 
-       int (*ctr)(struct dirty_log *log, struct dm_target *ti,
-                  unsigned int argc, char **argv);
-       void (*dtr)(struct dirty_log *log);
+       int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
+                  unsigned argc, char **argv);
+       void (*dtr)(struct dm_dirty_log *log);
 
        /*
         * There are times when we don't want the log to touch
         * the disk.
         */
-       int (*presuspend)(struct dirty_log *log);
-       int (*postsuspend)(struct dirty_log *log);
-       int (*resume)(struct dirty_log *log);
+       int (*presuspend)(struct dm_dirty_log *log);
+       int (*postsuspend)(struct dm_dirty_log *log);
+       int (*resume)(struct dm_dirty_log *log);
 
        /*
         * Retrieves the smallest size of region that the log can
         * deal with.
         */
-       uint32_t (*get_region_size)(struct dirty_log *log);
+       uint32_t (*get_region_size)(struct dm_dirty_log *log);
 
-        /*
+       /*
         * A predicate to say whether a region is clean or not.
         * May block.
         */
-       int (*is_clean)(struct dirty_log *log, region_t region);
+       int (*is_clean)(struct dm_dirty_log *log, region_t region);
 
        /*
         *  Returns: 0, 1, -EWOULDBLOCK, < 0
@@ -59,13 +63,14 @@ struct dirty_log_type {
         * passed to a daemon to deal with, since a daemon is
         * allowed to block.
         */
-       int (*in_sync)(struct dirty_log *log, region_t region, int can_block);
+       int (*in_sync)(struct dm_dirty_log *log, region_t region,
+                      int can_block);
 
        /*
         * Flush the current log state (eg, to disk).  This
         * function may block.
         */
-       int (*flush)(struct dirty_log *log);
+       int (*flush)(struct dm_dirty_log *log);
 
        /*
         * Mark an area as clean or dirty.  These functions may
@@ -73,8 +78,8 @@ struct dirty_log_type {
         * be extremely rare (eg, allocating another chunk of
         * memory for some reason).
         */
-       void (*mark_region)(struct dirty_log *log, region_t region);
-       void (*clear_region)(struct dirty_log *log, region_t region);
+       void (*mark_region)(struct dm_dirty_log *log, region_t region);
+       void (*clear_region)(struct dm_dirty_log *log, region_t region);
 
        /*
         * Returns: <0 (error), 0 (no region), 1 (region)
@@ -88,44 +93,39 @@ struct dirty_log_type {
         * tells you if an area is synchronised, the other
         * assigns recovery work.
        */
-       int (*get_resync_work)(struct dirty_log *log, region_t *region);
+       int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
 
        /*
         * This notifies the log that the resync status of a region
         * has changed.  It also clears the region from the recovering
         * list (if present).
         */
-       void (*set_region_sync)(struct dirty_log *log,
+       void (*set_region_sync)(struct dm_dirty_log *log,
                                region_t region, int in_sync);
 
-        /*
+       /*
         * Returns the number of regions that are in sync.
-         */
-        region_t (*get_sync_count)(struct dirty_log *log);
+        */
+       region_t (*get_sync_count)(struct dm_dirty_log *log);
 
        /*
         * Support function for mirror status requests.
         */
-       int (*status)(struct dirty_log *log, status_type_t status_type,
-                     char *result, unsigned int maxlen);
+       int (*status)(struct dm_dirty_log *log, status_type_t status_type,
+                     char *result, unsigned maxlen);
 };
 
-int dm_register_dirty_log_type(struct dirty_log_type *type);
-int dm_unregister_dirty_log_type(struct dirty_log_type *type);
-
+int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
+int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
 
 /*
  * Make sure you use these two functions, rather than calling
  * type->constructor/destructor() directly.
  */
-struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
-                                     unsigned int argc, char **argv);
-void dm_destroy_dirty_log(struct dirty_log *log);
-
-/*
- * init/exit functions.
- */
-int dm_dirty_log_init(void);
-void dm_dirty_log_exit(void);
+struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
+                                        struct dm_target *ti,
+                                        unsigned argc, char **argv);
+void dm_dirty_log_destroy(struct dm_dirty_log *log);
 
-#endif
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DM_DIRTY_LOG_H */
similarity index 84%
rename from drivers/md/dm-io.h
rename to include/linux/dm-io.h
index f647e2cceaa673f8098fce058832a3cc55ce23c4..b6bf17ee2f619601964d33d9c3e7cbf323f79317 100644 (file)
@@ -1,15 +1,20 @@
 /*
  * Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper low-level I/O.
  *
  * This file is released under the GPL.
  */
 
-#ifndef _DM_IO_H
-#define _DM_IO_H
+#ifndef _LINUX_DM_IO_H
+#define _LINUX_DM_IO_H
+
+#ifdef __KERNEL__
 
-#include "dm.h"
+#include <linux/types.h>
 
-struct io_region {
+struct dm_io_region {
        struct block_device *bdev;
        sector_t sector;
        sector_t count;         /* If this is zero the region is ignored. */
@@ -74,6 +79,7 @@ void dm_io_client_destroy(struct dm_io_client *client);
  * error occurred doing io to the corresponding region.
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
-         struct io_region *region, unsigned long *sync_error_bits);
+         struct dm_io_region *region, unsigned long *sync_error_bits);
 
-#endif
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
new file mode 100644 (file)
index 0000000..5db2163
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2001 - 2003 Sistina Software
+ * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
+ *
+ * kcopyd provides a simple interface for copying an area of one
+ * block-device to one or more other block-devices, either synchronous
+ * or with an asynchronous completion notification.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_KCOPYD_H
+#define _LINUX_DM_KCOPYD_H
+
+#ifdef __KERNEL__
+
+#include <linux/dm-io.h>
+
+/* FIXME: make this configurable */
+#define DM_KCOPYD_MAX_REGIONS 8
+
+#define DM_KCOPYD_IGNORE_ERROR 1
+
+/*
+ * To use kcopyd you must first create a dm_kcopyd_client object.
+ */
+struct dm_kcopyd_client;
+int dm_kcopyd_client_create(unsigned num_pages,
+                           struct dm_kcopyd_client **result);
+void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+
+/*
+ * Submit a copy job to kcopyd.  This is built on top of the
+ * previous three fns.
+ *
+ * read_err is a boolean,
+ * write_err is a bitset, with 1 bit for each destination region
+ */
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+                                   void *context);
+
+int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+                  unsigned num_dests, struct dm_io_region *dests,
+                  unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DM_KCOPYD_H */
index cc2be2cf7d4158108965ce5f9394595c68c651e0..6556f2f967e5d32f255a0176764268b30b5f87d5 100644 (file)
@@ -973,6 +973,7 @@ extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
 /* fs/locks.c */
 extern void locks_init_lock(struct file_lock *);
 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
+extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
 extern void locks_remove_posix(struct file *, fl_owner_t);
 extern void locks_remove_flock(struct file *);
 extern void posix_test_lock(struct file *, struct file_lock *);
index cae2c35d1206593bc6a3154e44ae47ef1db6fb75..28a094fcfe201f0a05a344011f338cf7c3c7dc72 100644 (file)
@@ -1025,10 +1025,6 @@ struct ff_effect {
  * @node: used to place the device onto input_dev_list
  */
 struct input_dev {
-       /* private: */
-       void *private;  /* do not use */
-       /* public: */
-
        const char *name;
        const char *phys;
        const char *uniq;
@@ -1238,12 +1234,12 @@ static inline void input_put_device(struct input_dev *dev)
 
 static inline void *input_get_drvdata(struct input_dev *dev)
 {
-       return dev->private;
+       return dev_get_drvdata(&dev->dev);
 }
 
 static inline void input_set_drvdata(struct input_dev *dev, void *data)
 {
-       dev->private = data;
+       dev_set_drvdata(&dev->dev, data);
 }
 
 int __must_check input_register_device(struct input_dev *);
index 65c2d70853e96557dc13c3f8e0a2e1420b4ccbf0..a3c984d780f0144422da0e0e9a4a754dc208510f 100644 (file)
@@ -33,6 +33,7 @@ struct keyboard_notifier_param {
        struct vc_data *vc;     /* VC on which the keyboard press was done */
        int down;               /* Pressure of the key? */
        int shift;              /* Current shift mask */
+       int ledstate;           /* Current led state */
        unsigned int value;     /* keycode, unicode value or keysym */
 };
 
index 94649a8da01403975c0c24e657f8062b9ec5dc80..102d928f7206b4bce0e242c8779b2de01117e8d5 100644 (file)
@@ -194,7 +194,7 @@ void                  nsm_release(struct nsm_handle *);
  * This is used in garbage collection and resource reclaim
  * A return value != 0 means destroy the lock/block/share
  */
-typedef int      (*nlm_host_match_fn_t)(struct nlm_host *cur, struct nlm_host *ref);
+typedef int      (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
 
 /*
  * Server-side lock handling
@@ -220,6 +220,12 @@ void                 nlmsvc_mark_resources(void);
 void             nlmsvc_free_host_resources(struct nlm_host *);
 void             nlmsvc_invalidate_all(void);
 
+/*
+ * Cluster failover support
+ */
+int           nlmsvc_unlock_all_by_sb(struct super_block *sb);
+int           nlmsvc_unlock_all_by_ip(__be32 server_addr);
+
 static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
 {
        return file->f_file->f_path.dentry->d_inode;
index 6977780e548f96809a2c5df708490f7bae81bff0..85fd041d44ad30ca9fea26eef7dfafb164d611f2 100644 (file)
@@ -57,6 +57,11 @@ extern char inftlmountrev[];
 void INFTL_dumptables(struct INFTLrecord *s);
 void INFTL_dumpVUchains(struct INFTLrecord *s);
 
+int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+                  size_t *retlen, uint8_t *buf);
+int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+                   size_t *retlen, uint8_t *buf);
+
 #endif /* __KERNEL__ */
 
 #endif /* __MTD_INFTL_H__ */
index bcf2fb3fa4a786300ca2a3d23e470bd25843faa2..001eec50cac6d12c12e3362ef1ca9687400357b8 100644 (file)
@@ -43,6 +43,11 @@ struct NFTLrecord {
 int NFTL_mount(struct NFTLrecord *s);
 int NFTL_formatblock(struct NFTLrecord *s, int block);
 
+int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+                 size_t *retlen, uint8_t *buf);
+int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+                  size_t *retlen, uint8_t *buf);
+
 #ifndef NFTL_MAJOR
 #define NFTL_MAJOR 93
 #endif
index fd0a260e070b483b7be93288d088367949366b47..9aa2a9149b583991c75054ffd15b583b8e540f3e 100644 (file)
@@ -187,4 +187,7 @@ struct onenand_manufacturers {
         char *name;
 };
 
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+                        struct mtd_oob_ops *ops);
+
 #endif /* __LINUX_MTD_ONENAND_H */
index 9667863bd7e3c95496bf0cd0b47e1e589b5ed9ca..0e37ad07bce2bb4895d9913350958d80016b83d5 100644 (file)
@@ -21,8 +21,9 @@
 #define PLATRAM_RW (1)
 
 struct platdata_mtd_ram {
-       char                    *mapname;
-       char                   **probes;
+       const char              *mapname;
+       const char              **map_probes;
+       const char              **probes;
        struct mtd_partition    *partitions;
        int                      nr_partitions;
        int                      bankwidth;
index 21ee440dd3e768eb2bbb1e72f3a33d54c31b69da..41d30c9c9de6e4b33045d232239f0366d2349916 100644 (file)
@@ -329,7 +329,7 @@ extern struct timeval       nfssvc_boot;
 (FATTR4_WORD0_SIZE              | FATTR4_WORD0_ACL                                         )
 #define NFSD_WRITEABLE_ATTRS_WORD1                                                          \
 (FATTR4_WORD1_MODE              | FATTR4_WORD1_OWNER         | FATTR4_WORD1_OWNER_GROUP     \
- | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_MODIFY_SET)
+ | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
 
 #endif /* CONFIG_NFSD_V4 */
 
index 779cbcd65f62d50ae56b897deff6adca44496cad..02df20f085fe9e89637affb8cb880d2a1f153628 100644 (file)
@@ -379,6 +379,18 @@ struct phy_driver {
 };
 #define to_phy_driver(d) container_of(d, struct phy_driver, driver)
 
+#define PHY_ANY_ID "MATCH ANY PHY"
+#define PHY_ANY_UID 0xffffffff
+
+/* A Structure for boards to register fixups with the PHY Lib */
+struct phy_fixup {
+       struct list_head list;
+       char bus_id[BUS_ID_SIZE];
+       u32 phy_uid;
+       u32 phy_uid_mask;
+       int (*run)(struct phy_device *phydev);
+};
+
 int phy_read(struct phy_device *phydev, u16 regnum);
 int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
 int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
@@ -386,8 +398,8 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
 int phy_clear_interrupt(struct phy_device *phydev);
 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
 struct phy_device * phy_attach(struct net_device *dev,
-               const char *phy_id, u32 flags, phy_interface_t interface);
-struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
+               const char *bus_id, u32 flags, phy_interface_t interface);
+struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
                void (*handler)(struct net_device *), u32 flags,
                phy_interface_t interface);
 void phy_disconnect(struct phy_device *phydev);
@@ -427,5 +439,13 @@ void phy_print_status(struct phy_device *phydev);
 struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
 void phy_device_free(struct phy_device *phydev);
 
+int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+               int (*run)(struct phy_device *));
+int phy_register_fixup_for_id(const char *bus_id,
+               int (*run)(struct phy_device *));
+int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
+               int (*run)(struct phy_device *));
+int phy_scan_fixups(struct phy_device *phydev);
+
 extern struct bus_type mdio_bus_type;
 #endif /* __PHY_H */
index 9f38250146744a605736879d1ba23fe969a07ad0..95674d97dabda27a1cdcb361546844b2b993da26 100644 (file)
@@ -211,5 +211,6 @@ static inline void serio_unpin_driver(struct serio *serio)
 #define SERIO_TOUCHWIN 0x33
 #define SERIO_TAOSEVM  0x34
 #define SERIO_FUJITSU  0x35
+#define SERIO_ZHENHUA  0x36
 
 #endif
index 334d3141162966a4daacc7233d4bdf843b4d4e6a..daf744017a31c86bfaacf0a98ad1ebfdd27a4595 100644 (file)
@@ -14,7 +14,8 @@ enum ads7846_filter {
 struct ads7846_platform_data {
        u16     model;                  /* 7843, 7845, 7846. */
        u16     vref_delay_usecs;       /* 0 for external vref; etc */
-       int     keep_vref_on:1;         /* set to keep vref on for differential
+       u16     vref_mv;                /* external vref value, milliVolts */
+       bool    keep_vref_on;           /* set to keep vref on for differential
                                         * measurements as well */
 
        /* Settling time of the analog signals; a function of Vcc and the
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
new file mode 100644 (file)
index 0000000..4d13732
--- /dev/null
@@ -0,0 +1,314 @@
+
+/*
+ * Register bits and API for Wolfson WM97xx series of codecs
+ */
+
+#ifndef _LINUX_WM97XX_H
+#define _LINUX_WM97XX_H
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+#include <sound/initval.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/input.h>       /* Input device layer */
+#include <linux/platform_device.h>
+
+/*
+ * WM97xx AC97 Touchscreen registers
+ */
+#define AC97_WM97XX_DIGITISER1         0x76
+#define AC97_WM97XX_DIGITISER2         0x78
+#define AC97_WM97XX_DIGITISER_RD       0x7a
+#define AC97_WM9713_DIG1               0x74
+#define AC97_WM9713_DIG2               AC97_WM97XX_DIGITISER1
+#define AC97_WM9713_DIG3               AC97_WM97XX_DIGITISER2
+
+/*
+ * WM97xx register bits
+ */
+#define WM97XX_POLL            0x8000  /* initiate a polling measurement */
+#define WM97XX_ADCSEL_X                0x1000  /* x coord measurement */
+#define WM97XX_ADCSEL_Y                0x2000  /* y coord measurement */
+#define WM97XX_ADCSEL_PRES     0x3000  /* pressure measurement */
+#define WM97XX_ADCSEL_MASK     0x7000
+#define WM97XX_COO             0x0800  /* enable coordinate mode */
+#define WM97XX_CTC             0x0400  /* enable continuous mode */
+#define WM97XX_CM_RATE_93      0x0000  /* 93.75Hz continuous rate */
+#define WM97XX_CM_RATE_187     0x0100  /* 187.5Hz continuous rate */
+#define WM97XX_CM_RATE_375     0x0200  /* 375Hz continuous rate */
+#define WM97XX_CM_RATE_750     0x0300  /* 750Hz continuous rate */
+#define WM97XX_CM_RATE_8K      0x00f0  /* 8kHz continuous rate */
+#define WM97XX_CM_RATE_12K     0x01f0  /* 12kHz continuous rate */
+#define WM97XX_CM_RATE_24K     0x02f0  /* 24kHz continuous rate */
+#define WM97XX_CM_RATE_48K     0x03f0  /* 48kHz continuous rate */
+#define WM97XX_CM_RATE_MASK    0x03f0
+#define WM97XX_RATE(i)         (((i & 3) << 8) | ((i & 4) ? 0xf0 : 0))
+#define WM97XX_DELAY(i)                ((i << 4) & 0x00f0)     /* sample delay times */
+#define WM97XX_DELAY_MASK      0x00f0
+#define WM97XX_SLEN            0x0008  /* slot read back enable */
+#define WM97XX_SLT(i)          ((i - 5) & 0x7) /* panel slot (5-11) */
+#define WM97XX_SLT_MASK                0x0007
+#define WM97XX_PRP_DETW                0x4000  /* detect on, digitise off, wake */
+#define WM97XX_PRP_DET         0x8000  /* detect on, digitise off, no wake */
+#define WM97XX_PRP_DET_DIG     0xc000  /* setect on, digitise on */
+#define WM97XX_RPR             0x2000  /* wake up on pen down */
+#define WM97XX_PEN_DOWN                0x8000  /* pen is down */
+#define WM97XX_ADCSRC_MASK     0x7000  /* ADC source mask */
+
+#define WM97XX_AUX_ID1         0x8001
+#define WM97XX_AUX_ID2         0x8002
+#define WM97XX_AUX_ID3         0x8003
+#define WM97XX_AUX_ID4         0x8004
+
+
+/* WM9712 Bits */
+#define WM9712_45W             0x1000  /* set for 5-wire touchscreen */
+#define WM9712_PDEN            0x0800  /* measure only when pen down */
+#define WM9712_WAIT            0x0200  /* wait until adc is read before next sample */
+#define WM9712_PIL             0x0100  /* current used for pressure measurement. set 400uA else 200uA */
+#define WM9712_MASK_HI         0x0040  /* hi on mask pin (47) stops conversions */
+#define WM9712_MASK_EDGE       0x0080  /* rising/falling edge on pin delays sample */
+#define        WM9712_MASK_SYNC        0x00c0  /* rising/falling edge on mask initiates sample */
+#define WM9712_RPU(i)          (i&0x3f)        /* internal pull up on pen detect (64k / rpu) */
+#define WM9712_PD(i)           (0x1 << i)      /* power management */
+
+/* WM9712 Registers */
+#define AC97_WM9712_POWER      0x24
+#define AC97_WM9712_REV                0x58
+
+/* WM9705 Bits */
+#define WM9705_PDEN            0x1000  /* measure only when pen is down */
+#define WM9705_PINV            0x0800  /* inverts sense of pen down output */
+#define WM9705_BSEN            0x0400  /* BUSY flag enable, pin47 is 1 when busy */
+#define WM9705_BINV            0x0200  /* invert BUSY (pin47) output */
+#define WM9705_WAIT            0x0100  /* wait until adc is read before next sample */
+#define WM9705_PIL             0x0080  /* current used for pressure measurement. set 400uA else 200uA */
+#define WM9705_PHIZ            0x0040  /* set PHONE and PCBEEP inputs to high impedance */
+#define WM9705_MASK_HI         0x0010  /* hi on mask stops conversions */
+#define WM9705_MASK_EDGE       0x0020  /* rising/falling edge on pin delays sample */
+#define        WM9705_MASK_SYNC        0x0030  /* rising/falling edge on mask initiates sample */
+#define WM9705_PDD(i)          (i & 0x000f)    /* pen detect comparator threshold */
+
+
+/* WM9713 Bits */
+#define WM9713_PDPOL           0x0400  /* Pen down polarity */
+#define WM9713_POLL            0x0200  /* initiate a polling measurement */
+#define WM9713_CTC             0x0100  /* enable continuous mode */
+#define WM9713_ADCSEL_X                0x0002  /* X measurement */
+#define WM9713_ADCSEL_Y                0x0004  /* Y measurement */
+#define WM9713_ADCSEL_PRES     0x0008  /* Pressure measurement */
+#define WM9713_COO             0x0001  /* enable coordinate mode */
+#define WM9713_PDEN            0x0800  /* measure only when pen down */
+#define WM9713_ADCSEL_MASK     0x00fe  /* ADC selection mask */
+#define WM9713_WAIT            0x0200  /* coordinate wait */
+
+/* AUX ADC ID's */
+#define TS_COMP1               0x0
+#define TS_COMP2               0x1
+#define TS_BMON                        0x2
+#define TS_WIPER               0x3
+
+/* ID numbers */
+#define WM97XX_ID1             0x574d
+#define WM9712_ID2             0x4c12
+#define WM9705_ID2             0x4c05
+#define WM9713_ID2             0x4c13
+
+/* Codec GPIO's */
+#define WM97XX_MAX_GPIO                16
+#define WM97XX_GPIO_1          (1 << 1)
+#define WM97XX_GPIO_2          (1 << 2)
+#define WM97XX_GPIO_3          (1 << 3)
+#define WM97XX_GPIO_4          (1 << 4)
+#define WM97XX_GPIO_5          (1 << 5)
+#define WM97XX_GPIO_6          (1 << 6)
+#define WM97XX_GPIO_7          (1 << 7)
+#define WM97XX_GPIO_8          (1 << 8)
+#define WM97XX_GPIO_9          (1 << 9)
+#define WM97XX_GPIO_10         (1 << 10)
+#define WM97XX_GPIO_11         (1 << 11)
+#define WM97XX_GPIO_12         (1 << 12)
+#define WM97XX_GPIO_13         (1 << 13)
+#define WM97XX_GPIO_14         (1 << 14)
+#define WM97XX_GPIO_15         (1 << 15)
+
+
+#define AC97_LINK_FRAME                21      /* time in uS for AC97 link frame */
+
+
+/*---------------- Return codes from sample reading functions ---------------*/
+
+/* More data is available; call the sample gathering function again */
+#define RC_AGAIN                       0x00000001
+/* The returned sample is valid */
+#define RC_VALID                       0x00000002
+/* The pen is up (the first RC_VALID without RC_PENUP means pen is down) */
+#define RC_PENUP                       0x00000004
+/* The pen is down (RC_VALID implies RC_PENDOWN, but sometimes it is helpful
+   to tell the handler that the pen is down but we don't know yet his coords,
+   so the handler should not sleep or wait for pendown irq) */
+#define RC_PENDOWN                     0x00000008
+
+/*
+ * The wm97xx driver provides a private API for writing platform-specific
+ * drivers.
+ */
+
+/* The structure used to return arch specific sampled data into */
+struct wm97xx_data {
+    int x;
+    int y;
+    int p;
+};
+
+/*
+ * Codec GPIO status
+ */
+enum wm97xx_gpio_status {
+    WM97XX_GPIO_HIGH,
+    WM97XX_GPIO_LOW
+};
+
+/*
+ * Codec GPIO direction
+ */
+enum wm97xx_gpio_dir {
+    WM97XX_GPIO_IN,
+    WM97XX_GPIO_OUT
+};
+
+/*
+ * Codec GPIO polarity
+ */
+enum wm97xx_gpio_pol {
+    WM97XX_GPIO_POL_HIGH,
+    WM97XX_GPIO_POL_LOW
+};
+
+/*
+ * Codec GPIO sticky
+ */
+enum wm97xx_gpio_sticky {
+    WM97XX_GPIO_STICKY,
+    WM97XX_GPIO_NOTSTICKY
+};
+
+/*
+ * Codec GPIO wake
+ */
+enum wm97xx_gpio_wake {
+    WM97XX_GPIO_WAKE,
+    WM97XX_GPIO_NOWAKE
+};
+
+/*
+ * Digitiser ioctl commands
+ */
+#define WM97XX_DIG_START       0x1
+#define WM97XX_DIG_STOP                0x2
+#define WM97XX_PHY_INIT                0x3
+#define WM97XX_AUX_PREPARE     0x4
+#define WM97XX_DIG_RESTORE     0x5
+
+struct wm97xx;
+
+extern struct wm97xx_codec_drv wm9705_codec;
+extern struct wm97xx_codec_drv wm9712_codec;
+extern struct wm97xx_codec_drv wm9713_codec;
+
+/*
+ * Codec driver interface - allows mapping to WM9705/12/13 and newer codecs
+ */
+struct wm97xx_codec_drv {
+       u16 id;
+       char *name;
+
+       /* read 1 sample */
+       int (*poll_sample) (struct wm97xx *, int adcsel, int *sample);
+
+       /* read X,Y,[P] in poll */
+       int (*poll_touch) (struct wm97xx *, struct wm97xx_data *);
+
+       int (*acc_enable) (struct wm97xx *, int enable);
+       void (*phy_init) (struct wm97xx *);
+       void (*dig_enable) (struct wm97xx *, int enable);
+       void (*dig_restore) (struct wm97xx *);
+       void (*aux_prepare) (struct wm97xx *);
+};
+
+
+/* Machine specific and accelerated touch operations */
+struct wm97xx_mach_ops {
+
+       /* accelerated touch readback - coords are transmited on AC97 link */
+       int acc_enabled;
+       void (*acc_pen_up) (struct wm97xx *);
+       int (*acc_pen_down) (struct wm97xx *);
+       int (*acc_startup) (struct wm97xx *);
+       void (*acc_shutdown) (struct wm97xx *);
+
+       /* interrupt mask control - required for accelerated operation */
+       void (*irq_enable) (struct wm97xx *, int enable);
+
+       /* GPIO pin used for accelerated operation */
+       int irq_gpio;
+
+       /* pre and post sample - can be used to minimise any analog noise */
+       void (*pre_sample) (int);  /* function to run before sampling */
+       void (*post_sample) (int);  /* function to run after sampling */
+};
+
+struct wm97xx {
+       u16 dig[3], id, gpio[6], misc;  /* Cached codec registers */
+       u16 dig_save[3];                /* saved during aux reading */
+       struct wm97xx_codec_drv *codec; /* attached codec driver*/
+       struct input_dev *input_dev;    /* touchscreen input device */
+       struct snd_ac97 *ac97;          /* ALSA codec access */
+       struct device *dev;             /* ALSA device */
+       struct platform_device *battery_dev;
+       struct platform_device *touch_dev;
+       struct wm97xx_mach_ops *mach_ops;
+       struct mutex codec_mutex;
+       struct delayed_work ts_reader;  /* Used to poll touchscreen */
+       unsigned long ts_reader_interval; /* Current interval for timer */
+       unsigned long ts_reader_min_interval; /* Minimum interval */
+       unsigned int pen_irq;           /* Pen IRQ number in use */
+       struct workqueue_struct *ts_workq;
+       struct work_struct pen_event_work;
+       u16 acc_slot;                   /* AC97 slot used for acc touch data */
+       u16 acc_rate;                   /* acc touch data rate */
+       unsigned pen_is_down:1;         /* Pen is down */
+       unsigned aux_waiting:1;         /* aux measurement waiting */
+       unsigned pen_probably_down:1;   /* used in polling mode */
+       u16 suspend_mode;               /* PRP in suspend mode */
+};
+
+/*
+ * Codec GPIO access (not supported on WM9705)
+ * This can be used to set/get codec GPIO and Virtual GPIO status.
+ */
+enum wm97xx_gpio_status wm97xx_get_gpio(struct wm97xx *wm, u32 gpio);
+void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio,
+                         enum wm97xx_gpio_status status);
+void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio,
+                                    enum wm97xx_gpio_dir dir,
+                                    enum wm97xx_gpio_pol pol,
+                                    enum wm97xx_gpio_sticky sticky,
+                                    enum wm97xx_gpio_wake wake);
+
+void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode);
+
+/* codec AC97 IO access */
+int wm97xx_reg_read(struct wm97xx *wm, u16 reg);
+void wm97xx_reg_write(struct wm97xx *wm, u16 reg, u16 val);
+
+/* aux adc readback */
+int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel);
+
+/* machine ops */
+int wm97xx_register_mach_ops(struct wm97xx *, struct wm97xx_mach_ops *);
+void wm97xx_unregister_mach_ops(struct wm97xx *);
+
+#endif
index 0c82c80b277f935f25e43c2a83ea348b1481153a..2ca6bae88721920215a649bc437393733452aae9 100644 (file)
@@ -97,10 +97,10 @@ struct xfrm_algo {
 };
 
 struct xfrm_algo_aead {
-       char    alg_name[64];
-       int     alg_key_len;    /* in bits */
-       int     alg_icv_len;    /* in bits */
-       char    alg_key[0];
+       char            alg_name[64];
+       unsigned int    alg_key_len;    /* in bits */
+       unsigned int    alg_icv_len;    /* in bits */
+       char            alg_key[0];
 };
 
 struct xfrm_stats {
index 4d46b3bdebd86ce579c0c057e53f28be412651af..8eb018f96002d84f7ae3a0810023cffc65333f04 100644 (file)
@@ -3,5 +3,4 @@ header-y += jffs2-user.h
 header-y += mtd-abi.h
 header-y += mtd-user.h
 header-y += nftl-user.h
-header-y += ubi-header.h
 header-y += ubi-user.h
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
new file mode 100644 (file)
index 0000000..fe43b0f
--- /dev/null
@@ -0,0 +1,61 @@
+/******************************************************************************
+ * balloon.h
+ *
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_BALLOON_H__
+#define __XEN_BALLOON_H__
+
+#include <linux/spinlock.h>
+
+#if 0
+/*
+ * Inform the balloon driver that it should allow some slop for device-driver
+ * memory activities.
+ */
+void balloon_update_driver_allowance(long delta);
+
+/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
+struct page **alloc_empty_pages_and_pagevec(int nr_pages);
+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
+
+void balloon_release_driver_page(struct page *page);
+
+/*
+ * Prevent the balloon driver from changing the memory reservation during
+ * a driver critical region.
+ */
+extern spinlock_t balloon_lock;
+#define balloon_lock(__flags)   spin_lock_irqsave(&balloon_lock, __flags)
+#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
+#endif
+
+#endif /* __XEN_BALLOON_H__ */
index 2bde54d29be52f3b8ffc9558590cff65d4cacd31..acd8e062c85f381b5520c2ac27f13681053bedd0 100644 (file)
@@ -5,13 +5,7 @@
 
 #include <xen/interface/event_channel.h>
 #include <asm/xen/hypercall.h>
-
-enum ipi_vector {
-       XEN_RESCHEDULE_VECTOR,
-       XEN_CALL_FUNCTION_VECTOR,
-
-       XEN_NR_IPIS,
-};
+#include <asm/xen/events.h>
 
 int bind_evtchn_to_irq(unsigned int evtchn);
 int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -37,6 +31,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
 
 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
+int resend_irq_on_evtchn(unsigned int irq);
 
 static inline void notify_remote_via_evtchn(int port)
 {
index 761c83498e032f2a78e7972305bed0f421446e99..466204846121541fe4192131cf4142c315c8f19a 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <asm/xen/hypervisor.h>
 #include <xen/interface/grant_table.h>
+#include <asm/xen/grant_table.h>
 
 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
 #define NR_GRANT_FRAMES 4
@@ -102,6 +103,12 @@ void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
                                       unsigned long pfn);
 
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          struct grant_entry **__shared);
+void arch_gnttab_unmap_shared(struct grant_entry *shared,
+                             unsigned long nr_gframes);
+
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
 #endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
new file mode 100644 (file)
index 0000000..4aadcba
--- /dev/null
@@ -0,0 +1,102 @@
+/******************************************************************************
+ * callback.h
+ *
+ * Register guest OS callbacks with Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Ian Campbell
+ */
+
+#ifndef __XEN_PUBLIC_CALLBACK_H__
+#define __XEN_PUBLIC_CALLBACK_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ *   long callback_op(int cmd, void *extra_args)
+ * @cmd        == CALLBACKOP_??? (callback operation).
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/* ia64, x86: Callback for event delivery. */
+#define CALLBACKTYPE_event                 0
+
+/* x86: Failsafe callback when guest state cannot be restored by Xen. */
+#define CALLBACKTYPE_failsafe              1
+
+/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
+#define CALLBACKTYPE_syscall               2
+
+/*
+ * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
+ *     feature is enabled. Do not use this callback type in new code.
+ */
+#define CALLBACKTYPE_sysenter_deprecated   3
+
+/* x86: Callback for NMI delivery. */
+#define CALLBACKTYPE_nmi                   4
+
+/*
+ * x86: sysenter is only available as follows:
+ * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
+ * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
+ *                      ('32-on-32-on-64', '32-on-64-on-64')
+ *                      [nb. also 64-bit guest applications on Intel CPUs
+ *                           ('64-on-64-on-64'), but syscall is preferred]
+ */
+#define CALLBACKTYPE_sysenter              5
+
+/*
+ * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
+ *                    ('32-on-32-on-64', '32-on-64-on-64')
+ */
+#define CALLBACKTYPE_syscall32             7
+
+/*
+ * Disable event deliver during callback? This flag is ignored for event and
+ * NMI callbacks: event delivery is unconditionally disabled.
+ */
+#define _CALLBACKF_mask_events             0
+#define CALLBACKF_mask_events              (1U << _CALLBACKF_mask_events)
+
+/*
+ * Register a callback.
+ */
+#define CALLBACKOP_register                0
+struct callback_register {
+    uint16_t type;
+    uint16_t flags;
+    struct xen_callback address;
+};
+
+/*
+ * Unregister a callback.
+ *
+ * Not all callbacks can be unregistered. -EINVAL will be returned if
+ * you attempt to unregister such a callback.
+ */
+#define CALLBACKOP_unregister              1
+struct callback_unregister {
+    uint16_t type;
+    uint16_t _unused;
+};
+
+#endif /* __XEN_PUBLIC_CALLBACK_H__ */
index 219049802cf25d28da2847fff536245925509954..39da93c21de05b3503c0b7e98c6bfefebee42b12 100644 (file)
@@ -185,6 +185,7 @@ struct gnttab_map_grant_ref {
     grant_handle_t handle;
     uint64_t dev_bus_addr;
 };
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
 
 /*
  * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
@@ -206,6 +207,7 @@ struct gnttab_unmap_grant_ref {
     /* OUT parameters. */
     int16_t  status;              /* GNTST_* */
 };
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
 
 /*
  * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
@@ -223,8 +225,9 @@ struct gnttab_setup_table {
     uint32_t nr_frames;
     /* OUT parameters. */
     int16_t  status;              /* GNTST_* */
-    ulong *frame_list;
+    GUEST_HANDLE(ulong) frame_list;
 };
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
 
 /*
  * GNTTABOP_dump_table: Dump the contents of the grant table to the
@@ -237,6 +240,7 @@ struct gnttab_dump_table {
     /* OUT parameters. */
     int16_t status;               /* GNTST_* */
 };
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
 
 /*
  * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
@@ -255,7 +259,7 @@ struct gnttab_transfer {
     /* OUT parameters. */
     int16_t       status;
 };
-
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
 
 /*
  * GNTTABOP_copy: Hypervisor based copy
@@ -296,6 +300,7 @@ struct gnttab_copy {
        /* OUT parameters. */
        int16_t       status;
 };
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
 
 /*
  * GNTTABOP_query_size: Query the current and maximum sizes of the shared
@@ -313,7 +318,7 @@ struct gnttab_query_size {
     uint32_t max_nr_frames;
     int16_t  status;              /* GNTST_* */
 };
-
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
 
 /*
  * Bitfield values for update_pin_status.flags.
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
new file mode 100644 (file)
index 0000000..5a934dd
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * fbif.h -- Xen virtual frame buffer device
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_FBIF_H__
+#define __XEN_PUBLIC_IO_FBIF_H__
+
+/* Out events (frontend -> backend) */
+
+/*
+ * Out events may be sent only when requested by backend, and receipt
+ * of an unknown out event is an error.
+ */
+
+/* Event type 1 currently not used */
+/*
+ * Framebuffer update notification event
+ * Capable frontend sets feature-update in xenstore.
+ * Backend requests it by setting request-update in xenstore.
+ */
+#define XENFB_TYPE_UPDATE 2
+
+struct xenfb_update {
+       uint8_t type;           /* XENFB_TYPE_UPDATE */
+       int32_t x;              /* source x */
+       int32_t y;              /* source y */
+       int32_t width;          /* rect width */
+       int32_t height;         /* rect height */
+};
+
+#define XENFB_OUT_EVENT_SIZE 40
+
+union xenfb_out_event {
+       uint8_t type;
+       struct xenfb_update update;
+       char pad[XENFB_OUT_EVENT_SIZE];
+};
+
+/* In events (backend -> frontend) */
+
+/*
+ * Frontends should ignore unknown in events.
+ * No in events currently defined.
+ */
+
+#define XENFB_IN_EVENT_SIZE 40
+
+union xenfb_in_event {
+       uint8_t type;
+       char pad[XENFB_IN_EVENT_SIZE];
+};
+
+/* shared page */
+
+#define XENFB_IN_RING_SIZE 1024
+#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
+#define XENFB_IN_RING_OFFS 1024
+#define XENFB_IN_RING(page) \
+       ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
+#define XENFB_IN_RING_REF(page, idx) \
+       (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
+
+#define XENFB_OUT_RING_SIZE 2048
+#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
+#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
+#define XENFB_OUT_RING(page) \
+       ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
+#define XENFB_OUT_RING_REF(page, idx) \
+       (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
+
+struct xenfb_page {
+       uint32_t in_cons, in_prod;
+       uint32_t out_cons, out_prod;
+
+       int32_t width;          /* width of the framebuffer (in pixels) */
+       int32_t height;         /* height of the framebuffer (in pixels) */
+       uint32_t line_length;   /* length of a row of pixels (in bytes) */
+       uint32_t mem_length;    /* length of the framebuffer (in bytes) */
+       uint8_t depth;          /* depth of a pixel (in bits) */
+
+       /*
+        * Framebuffer page directory
+        *
+        * Each directory page holds PAGE_SIZE / sizeof(*pd)
+        * framebuffer pages, and can thus map up to PAGE_SIZE *
+        * PAGE_SIZE / sizeof(*pd) bytes.  With PAGE_SIZE == 4096 and
+        * sizeof(unsigned long) == 4, that's 4 Megs.  Two directory
+        * pages should be enough for a while.
+        */
+       unsigned long pd[2];
+};
+
+/*
+ * Wart: xenkbd needs to know resolution.  Put it here until a better
+ * solution is found, but don't leak it to the backend.
+ */
+#ifdef __KERNEL__
+#define XENFB_WIDTH 800
+#define XENFB_HEIGHT 600
+#define XENFB_DEPTH 32
+#endif
+
+#endif
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
new file mode 100644 (file)
index 0000000..fb97f42
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * kbdif.h -- Xen virtual keyboard/mouse
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_KBDIF_H__
+#define __XEN_PUBLIC_IO_KBDIF_H__
+
+/* In events (backend -> frontend) */
+
+/*
+ * Frontends should ignore unknown in events.
+ */
+
+/* Pointer movement event */
+#define XENKBD_TYPE_MOTION  1
+/* Event type 2 currently not used */
+/* Key event (includes pointer buttons) */
+#define XENKBD_TYPE_KEY     3
+/*
+ * Pointer position event
+ * Capable backend sets feature-abs-pointer in xenstore.
+ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
+ * request-abs-update in xenstore.
+ */
+#define XENKBD_TYPE_POS     4
+
+struct xenkbd_motion {
+       uint8_t type;           /* XENKBD_TYPE_MOTION */
+       int32_t rel_x;          /* relative X motion */
+       int32_t rel_y;          /* relative Y motion */
+};
+
+struct xenkbd_key {
+       uint8_t type;           /* XENKBD_TYPE_KEY */
+       uint8_t pressed;        /* 1 if pressed; 0 otherwise */
+       uint32_t keycode;       /* KEY_* from linux/input.h */
+};
+
+struct xenkbd_position {
+       uint8_t type;           /* XENKBD_TYPE_POS */
+       int32_t abs_x;          /* absolute X position (in FB pixels) */
+       int32_t abs_y;          /* absolute Y position (in FB pixels) */
+};
+
+#define XENKBD_IN_EVENT_SIZE 40
+
+union xenkbd_in_event {
+       uint8_t type;
+       struct xenkbd_motion motion;
+       struct xenkbd_key key;
+       struct xenkbd_position pos;
+       char pad[XENKBD_IN_EVENT_SIZE];
+};
+
+/* Out events (frontend -> backend) */
+
+/*
+ * Out events may be sent only when requested by backend, and receipt
+ * of an unknown out event is an error.
+ * No out events currently defined.
+ */
+
+#define XENKBD_OUT_EVENT_SIZE 40
+
+union xenkbd_out_event {
+       uint8_t type;
+       char pad[XENKBD_OUT_EVENT_SIZE];
+};
+
+/* shared page */
+
+#define XENKBD_IN_RING_SIZE 2048
+#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
+#define XENKBD_IN_RING_OFFS 1024
+#define XENKBD_IN_RING(page) \
+       ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
+#define XENKBD_IN_RING_REF(page, idx) \
+       (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
+
+#define XENKBD_OUT_RING_SIZE 1024
+#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
+#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
+#define XENKBD_OUT_RING(page) \
+       ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
+#define XENKBD_OUT_RING_REF(page, idx) \
+       (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
+
+struct xenkbd_page {
+       uint32_t in_cons, in_prod;
+       uint32_t out_cons, out_prod;
+};
+
+#endif
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
new file mode 100644 (file)
index 0000000..01fc8ae
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __XEN_PROTOCOLS_H__
+#define __XEN_PROTOCOLS_H__
+
+#define XEN_IO_PROTO_ABI_X86_32     "x86_32-abi"
+#define XEN_IO_PROTO_ABI_X86_64     "x86_64-abi"
+#define XEN_IO_PROTO_ABI_IA64       "ia64-abi"
+#define XEN_IO_PROTO_ABI_POWERPC64  "powerpc64-abi"
+
+#if defined(__i386__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
+#elif defined(__x86_64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
+#elif defined(__ia64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
+#elif defined(__powerpc64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
+#else
+# error arch fixup needed here
+#endif
+
+#endif
index af36ead168170c1ded3819d523318490342d848e..da768469aa92b1a3cd948fc5f03f3cf0bd69751d 100644 (file)
@@ -29,7 +29,7 @@ struct xen_memory_reservation {
      *   OUT: GMFN bases of extents that were allocated
      *   (NB. This command also updates the mach_to_phys translation table)
      */
-    GUEST_HANDLE(ulong) extent_start;
+    ulong extent_start;
 
     /* Number of extents, and size/alignment of each (2^extent_order pages). */
     unsigned long  nr_extents;
@@ -50,7 +50,6 @@ struct xen_memory_reservation {
     domid_t        domid;
 
 };
-DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
 
 /*
  * Returns the maximum machine frame number of mapped RAM in this system.
@@ -86,7 +85,7 @@ struct xen_machphys_mfn_list {
      * any large discontiguities in the machine address space, 2MB gaps in
      * the machphys table will be represented by an MFN base of zero.
      */
-    GUEST_HANDLE(ulong) extent_start;
+    ulong extent_start;
 
     /*
      * Number of extents written to the above array. This will be smaller
@@ -94,7 +93,6 @@ struct xen_machphys_mfn_list {
      */
     unsigned int nr_extents;
 };
-DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
 
 /*
  * Sets the GPFN at which a particular page appears in the specified guest's
@@ -117,7 +115,6 @@ struct xen_add_to_physmap {
     /* GPFN where the source mapping page should appear. */
     unsigned long gpfn;
 };
-DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
 
 /*
  * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
@@ -132,14 +129,13 @@ struct xen_translate_gpfn_list {
     unsigned long nr_gpfns;
 
     /* List of GPFNs to translate. */
-    GUEST_HANDLE(ulong) gpfn_list;
+    ulong gpfn_list;
 
     /*
      * Output list to contain MFN translations. May be the same as the input
      * list (in which case each input GPFN is overwritten with the output MFN).
      */
-    GUEST_HANDLE(ulong) mfn_list;
+    ulong mfn_list;
 };
-DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
index b05d8a6d91434f10b69b67e9c85d8835d92fda11..87e6f8a4866198d022930a2af17db0e23153a7b6 100644 (file)
@@ -85,6 +85,7 @@ struct vcpu_runstate_info {
                 */
                uint64_t time[4];
 };
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
 
 /* VCPU is currently running on a physical CPU. */
 #define RUNSTATE_running  0
@@ -119,6 +120,7 @@ struct vcpu_runstate_info {
 #define VCPUOP_register_runstate_memory_area 5
 struct vcpu_register_runstate_memory_area {
                union {
+                               GUEST_HANDLE(vcpu_runstate_info) h;
                                struct vcpu_runstate_info *v;
                                uint64_t p;
                } addr;
@@ -134,6 +136,7 @@ struct vcpu_register_runstate_memory_area {
 struct vcpu_set_periodic_timer {
                uint64_t period_ns;
 };
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
 
 /*
  * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
@@ -145,6 +148,7 @@ struct vcpu_set_singleshot_timer {
                uint64_t timeout_abs_ns;
                uint32_t flags;                    /* VCPU_SSHOTTMR_??? */
 };
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
 
 /* Flags to VCPUOP_set_singleshot_timer. */
  /* Require the timeout to be in the future (return -ETIME if it's passed). */
@@ -164,5 +168,6 @@ struct vcpu_register_vcpu_info {
     uint32_t offset; /* offset within page */
     uint32_t rsvd;   /* unused */
 };
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
 
 #endif /* __XEN_PUBLIC_VCPU_H__ */
index 518a5bf79ed34becbf0b67e822421387124da534..9b018da48cf3145705af0a2f4f0b4228e8b1b889 100644 (file)
 #define __HYPERVISOR_physdev_op           33
 #define __HYPERVISOR_hvm_op               34
 
+/* Architecture-specific hypercall definitions. */
+#define __HYPERVISOR_arch_0               48
+#define __HYPERVISOR_arch_1               49
+#define __HYPERVISOR_arch_2               50
+#define __HYPERVISOR_arch_3               51
+#define __HYPERVISOR_arch_4               52
+#define __HYPERVISOR_arch_5               53
+#define __HYPERVISOR_arch_6               54
+#define __HYPERVISOR_arch_7               55
+
 /*
  * VIRTUAL INTERRUPTS
  *
 #define VIRQ_CONSOLE    2  /* (DOM0) Bytes received on emergency console. */
 #define VIRQ_DOM_EXC    3  /* (DOM0) Exceptional event for some domain.   */
 #define VIRQ_DEBUGGER   6  /* (DOM0) A domain has paused for debugging.   */
-#define NR_VIRQS        8
 
+/* Architecture-specific VIRQ definitions. */
+#define VIRQ_ARCH_0    16
+#define VIRQ_ARCH_1    17
+#define VIRQ_ARCH_2    18
+#define VIRQ_ARCH_3    19
+#define VIRQ_ARCH_4    20
+#define VIRQ_ARCH_5    21
+#define VIRQ_ARCH_6    22
+#define VIRQ_ARCH_7    23
+
+#define NR_VIRQS       24
 /*
  * MMU-UPDATE REQUESTS
  *
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
new file mode 100644 (file)
index 0000000..ac45e07
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) IBM Corp. 2006
+ */
+
+#ifndef _XEN_XENCOMM_H_
+#define _XEN_XENCOMM_H_
+
+/* A xencomm descriptor is a scatter/gather list containing physical
+ * addresses corresponding to a virtually contiguous memory area. The
+ * hypervisor translates these physical addresses to machine addresses to copy
+ * to and from the virtually contiguous area.
+ */
+
+#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
+#define XENCOMM_INVALID (~0UL)
+
+struct xencomm_desc {
+    uint32_t magic;
+    uint32_t nr_addrs; /* the number of entries in address[] */
+    uint64_t address[0];
+};
+
+#endif /* _XEN_XENCOMM_H_ */
index 031ef22a971e551be070a15e61c4ad6f10ca225e..eaf85fab12632d45cc00d8976d9108a3eacc2850 100644 (file)
@@ -1,180 +1 @@
-#ifndef __XEN_PAGE_H
-#define __XEN_PAGE_H
-
-#include <linux/pfn.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-
-#include <xen/features.h>
-
-#ifdef CONFIG_X86_PAE
-/* Xen machine address */
-typedef struct xmaddr {
-       unsigned long long maddr;
-} xmaddr_t;
-
-/* Xen pseudo-physical address */
-typedef struct xpaddr {
-       unsigned long long paddr;
-} xpaddr_t;
-#else
-/* Xen machine address */
-typedef struct xmaddr {
-       unsigned long maddr;
-} xmaddr_t;
-
-/* Xen pseudo-physical address */
-typedef struct xpaddr {
-       unsigned long paddr;
-} xpaddr_t;
-#endif
-
-#define XMADDR(x)      ((xmaddr_t) { .maddr = (x) })
-#define XPADDR(x)      ((xpaddr_t) { .paddr = (x) })
-
-/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-#define INVALID_P2M_ENTRY      (~0UL)
-#define FOREIGN_FRAME_BIT      (1UL<<31)
-#define FOREIGN_FRAME(m)       ((m) | FOREIGN_FRAME_BIT)
-
-extern unsigned long *phys_to_machine_mapping;
-
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
-{
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return pfn;
-
-       return phys_to_machine_mapping[(unsigned int)(pfn)] &
-               ~FOREIGN_FRAME_BIT;
-}
-
-static inline int phys_to_machine_mapping_valid(unsigned long pfn)
-{
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return 1;
-
-       return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
-}
-
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
-{
-       unsigned long pfn;
-
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return mfn;
-
-#if 0
-       if (unlikely((mfn >> machine_to_phys_order) != 0))
-               return max_mapnr;
-#endif
-
-       pfn = 0;
-       /*
-        * The array access can fail (e.g., device space beyond end of RAM).
-        * In such cases it doesn't matter what we return (we return garbage),
-        * but we must handle the fault without crashing!
-        */
-       __get_user(pfn, &machine_to_phys_mapping[mfn]);
-
-       return pfn;
-}
-
-static inline xmaddr_t phys_to_machine(xpaddr_t phys)
-{
-       unsigned offset = phys.paddr & ~PAGE_MASK;
-       return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
-}
-
-static inline xpaddr_t machine_to_phys(xmaddr_t machine)
-{
-       unsigned offset = machine.maddr & ~PAGE_MASK;
-       return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
-}
-
-/*
- * We detect special mappings in one of two ways:
- *  1. If the MFN is an I/O page then Xen will set the m2p entry
- *     to be outside our maximum possible pseudophys range.
- *  2. If the MFN belongs to a different domain then we will certainly
- *     not have MFN in our p2m table. Conversely, if the page is ours,
- *     then we'll have p2m(m2p(MFN))==MFN.
- * If we detect a special mapping then it doesn't have a 'struct page'.
- * We force !pfn_valid() by returning an out-of-range pointer.
- *
- * NB. These checks require that, for any MFN that is not in our reservation,
- * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
- * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
- * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
- *
- * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
- *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- *      require. In all the cases we care about, the FOREIGN_FRAME bit is
- *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
- */
-static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
-{
-       extern unsigned long max_mapnr;
-       unsigned long pfn = mfn_to_pfn(mfn);
-       if ((pfn < max_mapnr)
-           && !xen_feature(XENFEAT_auto_translated_physmap)
-           && (phys_to_machine_mapping[pfn] != mfn))
-               return max_mapnr; /* force !pfn_valid() */
-       return pfn;
-}
-
-static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-       if (xen_feature(XENFEAT_auto_translated_physmap)) {
-               BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-               return;
-       }
-       phys_to_machine_mapping[pfn] = mfn;
-}
-
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(v)     (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_mfn(v)         (pfn_to_mfn(PFN_DOWN(__pa(v))))
-#define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-
-#ifdef CONFIG_X86_PAE
-#define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) |                        \
-                      (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)))
-
-static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
-{
-       pte_t pte;
-
-       pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) |
-               (pgprot_val(pgprot) >> 32);
-       pte.pte_high &= (__supported_pte_mask >> 32);
-       pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
-       pte.pte_low &= __supported_pte_mask;
-
-       return pte;
-}
-
-static inline unsigned long long pte_val_ma(pte_t x)
-{
-       return x.pte;
-}
-#define pmd_val_ma(v) ((v).pmd)
-#define pud_val_ma(v) ((v).pgd.pgd)
-#define __pte_ma(x)    ((pte_t) { .pte = (x) })
-#define __pmd_ma(x)    ((pmd_t) { (x) } )
-#else  /* !X86_PAE */
-#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
-#define mfn_pte(pfn, prot)     __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pte_val_ma(x)  ((x).pte)
-#define pmd_val_ma(v)  ((v).pud.pgd.pgd)
-#define __pte_ma(x)    ((pte_t) { (x) } )
-#endif /* CONFIG_X86_PAE */
-
-#define pgd_val_ma(x)  ((x).pgd)
-
-
-xmaddr_t arbitrary_virt_to_machine(unsigned long address);
-void make_lowmem_page_readonly(void *vaddr);
-void make_lowmem_page_readwrite(void *vaddr);
-
-#endif /* __XEN_PAGE_H */
+#include <asm/xen/page.h>
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
new file mode 100644 (file)
index 0000000..10ddfe0
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef INCLUDE_XEN_OPS_H
+#define INCLUDE_XEN_OPS_H
+
+#include <linux/percpu.h>
+
+DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
+
+#endif /* INCLUDE_XEN_OPS_H */
index 6f7c290651ae49d752b9c01f2178875c742f5ab2..6369d89c25d5346a6e8dcc4948218c883d900f37 100644 (file)
@@ -97,6 +97,7 @@ struct xenbus_driver {
        int (*uevent)(struct xenbus_device *, char **, int, char *, int);
        struct device_driver driver;
        int (*read_otherend_details)(struct xenbus_device *dev);
+       int (*is_ready)(struct xenbus_device *dev);
 };
 
 static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
new file mode 100644 (file)
index 0000000..e43b039
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *          Jerone Young <jyoung5@us.ibm.com>
+ */
+
+#ifndef _LINUX_XENCOMM_H_
+#define _LINUX_XENCOMM_H_
+
+#include <xen/interface/xencomm.h>
+
+#define XENCOMM_MINI_ADDRS 3
+struct xencomm_mini {
+       struct xencomm_desc _desc;
+       uint64_t address[XENCOMM_MINI_ADDRS];
+};
+
+/* To avoid additionnal virt to phys conversion, an opaque structure is
+   presented.  */
+struct xencomm_handle;
+
+extern void xencomm_free(struct xencomm_handle *desc);
+extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
+extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
+                       unsigned long bytes,  struct xencomm_mini *xc_area);
+
+#if 0
+#define XENCOMM_MINI_ALIGNED(xc_desc, n)                               \
+       struct xencomm_mini xc_desc ## _base[(n)]                       \
+       __attribute__((__aligned__(sizeof(struct xencomm_mini))));      \
+       struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
+#else
+/*
+ * gcc bug workaround:
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
+ * gcc doesn't handle properly stack variable with
+ * __attribute__((__align__(sizeof(struct xencomm_mini))))
+ */
+#define XENCOMM_MINI_ALIGNED(xc_desc, n)                               \
+       unsigned char xc_desc ## _base[((n) + 1 ) *                     \
+                                      sizeof(struct xencomm_mini)];    \
+       struct xencomm_mini *xc_desc = (struct xencomm_mini *)          \
+               ((unsigned long)xc_desc ## _base +                      \
+                (sizeof(struct xencomm_mini) -                         \
+                 ((unsigned long)xc_desc ## _base) %                   \
+                 sizeof(struct xencomm_mini)));
+#endif
+#define xencomm_map_no_alloc(ptr, bytes)                       \
+       ({ XENCOMM_MINI_ALIGNED(xc_desc, 1);                    \
+               __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
+
+/* provided by architecture code: */
+extern unsigned long xencomm_vtop(unsigned long vaddr);
+
+static inline void *xencomm_pa(void *ptr)
+{
+       return (void *)xencomm_vtop((unsigned long)ptr);
+}
+
+#define xen_guest_handle(hnd)  ((hnd).p)
+
+#endif /* _LINUX_XENCOMM_H_ */
index 0014b03adaca0af7d09d25d3b5c0df3c007b2462..09ca69b2c17d4e4174d263bcbb35d64119bef04b 100644 (file)
@@ -8128,7 +8128,7 @@ void __init sched_init(void)
         * we use alloc_bootmem().
         */
        if (alloc_size) {
-               ptr = (unsigned long)alloc_bootmem_low(alloc_size);
+               ptr = (unsigned long)alloc_bootmem(alloc_size);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
                init_task_group.se = (struct sched_entity **)ptr;
index 623ef24c23812894c1617f0db781fb3065d6bdf2..754cc0027f2a8acc6f8358dd6a1ae0b1346845ba 100644 (file)
@@ -25,6 +25,17 @@ config ENABLE_MUST_CHECK
          suppress the "warning: ignoring return value of 'foo', declared with
          attribute warn_unused_result" messages.
 
+config FRAME_WARN
+       int "Warn for stack frames larger than (needs gcc 4.4)"
+       range 0 8192
+       default 1024 if !64BIT
+       default 2048 if 64BIT
+       help
+         Tell gcc to warn at build time for stack frames larger than this.
+         Setting this too low will cause a lot of warnings.
+         Setting it to 0 disables the warning.
+         Requires gcc 4.4
+
 config MAGIC_SYSRQ
        bool "Magic SysRq key"
        depends on !UML
index ead50c7c0d40f54e8b05d7b1c9e04cdef5b92fb2..201cbfc6b9ece724b3cda9427f27d976534567c7 100644 (file)
@@ -573,7 +573,8 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
                        int fsize = ro->count * sizeof(struct can_filter);
                        if (len > fsize)
                                len = fsize;
-                       err = copy_to_user(optval, ro->filter, len);
+                       if (copy_to_user(optval, ro->filter, len))
+                               err = -EFAULT;
                } else
                        len = 0;
                release_sock(sk);
index a29b43d0b450a5e1a265375bd4f77797f0b23d79..0133b5ebd545287be9a2ee19d8fbf75db8e9ca59 100644 (file)
@@ -323,6 +323,11 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
                bytes_remaining -= eeprom.len;
        }
 
+       eeprom.len = userbuf - (useraddr + sizeof(eeprom));
+       eeprom.offset -= eeprom.len;
+       if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+               ret = -EFAULT;
+
        kfree(data);
        return ret;
 }
index 6e1df62bd7c9167dc6e123b20c5af79ce30537c3..0bcdc92502794867b7254bc338044555b6493245 100644 (file)
@@ -140,7 +140,7 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
                goto out_free;
 
        cnt = kfifo_get(dccpw.fifo, tbuf, len);
-       error = copy_to_user(buf, tbuf, cnt);
+       error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
 
 out_free:
        vfree(tbuf);
index 1c509592574a79d6ee53a6104e8cfe6466058a5c..5ff0ce6e9d39d25538f5c98dff986231fbaebcc9 100644 (file)
@@ -190,19 +190,18 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
 
                width = tcpprobe_sprint(tbuf, sizeof(tbuf));
 
-               if (width < len)
+               if (cnt + width < len)
                        tcp_probe.tail = (tcp_probe.tail + 1) % bufsize;
 
                spin_unlock_bh(&tcp_probe.lock);
 
                /* if record greater than space available
                   return partial buffer (so far) */
-               if (width >= len)
+               if (cnt + width >= len)
                        break;
 
-               error = copy_to_user(buf + cnt, tbuf, width);
-               if (error)
-                       break;
+               if (copy_to_user(buf + cnt, tbuf, width))
+                       return -EFAULT;
                cnt += width;
        }
 
index 42814a2ec9d73f0249b53386e997104cde4dee64..b2c9becc02e8cad4e6453578587cabc6a7701c52 100644 (file)
@@ -167,7 +167,7 @@ config IPV6_SIT
          Tunneling means encapsulating data of one protocol type within
          another protocol and sending it over a channel that understands the
          encapsulating protocol. This driver implements encapsulation of IPv6
-         into IPv4 packets. This is useful if you want to connect two IPv6
+         into IPv4 packets. This is useful if you want to connect to IPv6
          networks over an IPv4-only path.
 
          Saying M here will produce a module called sit.ko. If unsure, say Y.
index 6193b124cbc764bd367df15addead75c07ae0322..396f0ea11090a98d6a07898d0e79d58fb7652313 100644 (file)
@@ -971,6 +971,19 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
 
        switch (optname) {
                case IPV6_CHECKSUM:
+                       if (inet_sk(sk)->num == IPPROTO_ICMPV6 &&
+                           level == IPPROTO_IPV6) {
+                               /*
+                                * RFC3542 tells that IPV6_CHECKSUM socket
+                                * option in the IPPROTO_IPV6 level is not
+                                * allowed on ICMPv6 sockets.
+                                * If you want to set it, use IPPROTO_RAW
+                                * level IPV6_CHECKSUM socket option
+                                * (Linux extension).
+                                */
+                               return -EINVAL;
+                       }
+
                        /* You may get strange result with a positive odd offset;
                           RFC2292bis agrees with me. */
                        if (val > 0 && (val&1))
@@ -1046,6 +1059,11 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
 
        switch (optname) {
        case IPV6_CHECKSUM:
+               /*
+                * We allow getsockopt() for IPPROTO_IPV6-level
+                * IPV6_CHECKSUM socket option on ICMPv6 sockets
+                * since RFC3542 is silent about it.
+                */
                if (rp->checksum == 0)
                        val = -1;
                else
index 81a8e5297ad113be9d1c09de897df283532c9632..2403a31fe0f64ea6de774982e2ba3c6b7ea36a35 100644 (file)
@@ -2356,7 +2356,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
        struct xfrm_selector sel;
        struct km_event c;
        struct sadb_x_sec_ctx *sec_ctx;
-       struct xfrm_sec_ctx *pol_ctx;
+       struct xfrm_sec_ctx *pol_ctx = NULL;
 
        if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
                                     ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
@@ -2396,8 +2396,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
                kfree(uctx);
                if (err)
                        return err;
-       } else
-               pol_ctx = NULL;
+       }
 
        xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN,
                                   pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
index 05853159536a03b984708c400808dcf6e26fcb77..230f9ca2ad6b361dd4e78e6a2f1247334e9b9d22 100644 (file)
@@ -1756,8 +1756,8 @@ static int getsockopt(struct socket *sock,
        else if (len < sizeof(value)) {
                res = -EINVAL;
        }
-       else if ((res = copy_to_user(ov, &value, sizeof(value)))) {
-               /* couldn't return value */
+       else if (copy_to_user(ov, &value, sizeof(value))) {
+               res = -EFAULT;
        }
        else {
                res = put_user(sizeof(value), ol);
index 67fb4530a6ff88afb7e3a86945dc2837c87c72f5..277cfe0b71001e69bc67d3eda6407250d07d7ab8 100644 (file)
@@ -27,12 +27,12 @@ ccflags-y  :=
 cppflags-y :=
 ldflags-y  :=
 
-# Read .config if it exist, otherwise ignore
+# Read auto.conf if it exists, otherwise ignore
 -include include/config/auto.conf
 
 include scripts/Kbuild.include
 
-# For backward compatibility check that these variables does not change
+# For backward compatibility check that these variables do not change
 save-cflags := $(CFLAGS)
 
 # The filename Kbuild has precedence over Makefile
@@ -55,7 +55,7 @@ hostprogs-y += $(host-progs)
 endif
 endif
 
-# Do not include host rules unles needed
+# Do not include host rules unless needed
 ifneq ($(hostprogs-y)$(hostprogs-m),)
 include scripts/Makefile.host
 endif
index 2c647107c9ccadef446364a70dc16af552d3b237..6f89fbb56256987bf0c002683f85941ccddc861a 100644 (file)
@@ -37,7 +37,7 @@ subdir-ymn      := $(sort $(subdir-ym) $(subdir-n) $(subdir-))
 
 subdir-ymn     := $(addprefix $(obj)/,$(subdir-ymn))
 
-# build a list of files to remove, usually releative to the current
+# build a list of files to remove, usually relative to the current
 # directory
 
 __clean-files  := $(extra-y) $(always)                  \
index 6943a7a5bb989a9b9c3728fa267c818ab4e52a15..1ac414fd50300384663248c0ac651c9887bfe45d 100644 (file)
@@ -3,9 +3,9 @@
 # Binaries are used during the compilation of the kernel, for example
 # to preprocess a data file.
 #
-# Both C and C++ is supported, but preferred language is C for such utilities.
+# Both C and C++ are supported, but preferred language is C for such utilities.
 #
-# Samle syntax (see Documentation/kbuild/makefile.txt for reference)
+# Sample syntax (see Documentation/kbuild/makefiles.txt for reference)
 # hostprogs-y := bin2hex
 # Will compile bin2hex.c and create an executable named bin2hex
 #
 # hostprogs-y := conf
 # conf-objs  := conf.o libkconfig.so
 # libkconfig-objs := expr.o type.o
-# Will create a shared library named libkconfig.so that consist of
-# expr.o and type.o (they are both compiled as C code and the object file
+# Will create a shared library named libkconfig.so that consists of
+# expr.o and type.o (they are both compiled as C code and the object files
 # are made as position independent code).
-# conf.c is compiled as a c program, and conf.o is linked together with
+# conf.c is compiled as a C program, and conf.o is linked together with
 # libkconfig.so as the executable conf.
 # Note: Shared libraries consisting of C++ files are not supported
 
@@ -61,7 +61,7 @@ host-cobjs    := $(filter-out %.so,$(host-cobjs))
 host-cshobjs   := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
 
 # output directory for programs/.o files
-# hostprogs-y := tools/build may have been specified. Retreive directory
+# hostprogs-y := tools/build may have been specified. Retrieve directory
 host-objdirs := $(foreach f,$(__hostprogs), $(if $(dir $(f)),$(dir $(f))))
 # directory of .o files from prog-objs notation
 host-objdirs += $(foreach f,$(host-cmulti),                  \
index 2d20640854b7412bd46156f568a9a29443b6c0aa..24b3c8fe6bca9f7e679bb23045dcf6a24f77b0d3 100644 (file)
@@ -42,6 +42,13 @@ _modpost: __modpost
 
 include include/config/auto.conf
 include scripts/Kbuild.include
+
+ifneq ($(KBUILD_EXTMOD),)
+# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
+include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
+             $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
+endif
+
 include scripts/Makefile.lib
 
 kernelsymfile := $(objtree)/Module.symvers
@@ -69,6 +76,7 @@ modpost = scripts/mod/modpost                    \
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
+ $(if $(iKBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \
index 32e8c5a227c3a28bab34514da98e1491d057bef4..fa1a7d565903075ef39d12ba69ecf580b3c37de9 100644 (file)
@@ -36,10 +36,10 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            --output $(obj)/config.pot
        $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
        $(Q)ln -fs Kconfig.i386 arch/um/Kconfig.arch
-       $(Q)(for i in `ls arch/`;                        \
+       $(Q)(for i in `ls arch/*/Kconfig`;               \
            do                                           \
                echo "  GEN $$i";                        \
-               $(obj)/kxgettext arch/$$i/Kconfig        \
+               $(obj)/kxgettext $$i                     \
                     >> $(obj)/config.pot;               \
            done )
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
index 110cf243fa4e56d3749781c7b4703bf4c62ae9b7..f8b42ab0724be194a258a00e4e24f814ad3e7475 100644 (file)
@@ -1552,6 +1552,10 @@ static void read_symbols(char *modname)
        }
 
        license = get_modinfo(info.modinfo, info.modinfo_len, "license");
+       if (!license && !is_vmlinux(modname))
+               fatal("modpost: missing MODULE_LICENSE() in %s\n"
+                     "see include/linux/module.h for "
+                     "more information\n", modname);
        while (license) {
                if (license_is_gpl_compatible(license))
                        mod->gpl_compatible = 1;
@@ -2015,6 +2019,11 @@ static void write_markers(const char *fname)
        write_if_changed(&buf, fname);
 }
 
+struct ext_sym_list {
+       struct ext_sym_list *next;
+       const char *file;
+};
+
 int main(int argc, char **argv)
 {
        struct module *mod;
@@ -2025,8 +2034,10 @@ int main(int argc, char **argv)
        char *markers_write = NULL;
        int opt;
        int err;
+       struct ext_sym_list *extsym_iter;
+       struct ext_sym_list *extsym_start = NULL;
 
-       while ((opt = getopt(argc, argv, "i:I:cmsSo:awM:K:")) != -1) {
+       while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:")) != -1) {
                switch (opt) {
                case 'i':
                        kernel_read = optarg;
@@ -2038,6 +2049,14 @@ int main(int argc, char **argv)
                case 'c':
                        cross_build = 1;
                        break;
+               case 'e':
+                       external_module = 1;
+                       extsym_iter =
+                          NOFAIL(malloc(sizeof(*extsym_iter)));
+                       extsym_iter->next = extsym_start;
+                       extsym_iter->file = optarg;
+                       extsym_start = extsym_iter;
+                       break;
                case 'm':
                        modversions = 1;
                        break;
@@ -2071,6 +2090,12 @@ int main(int argc, char **argv)
                read_dump(kernel_read, 1);
        if (module_read)
                read_dump(module_read, 0);
+       while (extsym_start) {
+               read_dump(extsym_start->file, 0);
+               extsym_iter = extsym_start->next;
+               free(extsym_start);
+               extsym_start = extsym_iter;
+       }
 
        while (optind < argc)
                read_symbols(argv[optind++]);