]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge master.kernel.org:/home/rmk/linux-2.6-mmc
authorLinus Torvalds <torvalds@g5.osdl.org>
Fri, 28 Oct 2005 16:25:21 +0000 (09:25 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 28 Oct 2005 16:25:21 +0000 (09:25 -0700)
590 files changed:
Documentation/DocBook/libata.tmpl
Documentation/block/biodoc.txt
Documentation/networking/bonding.txt
Makefile
arch/alpha/kernel/pci-noop.c
arch/alpha/kernel/pci_iommu.c
arch/arm/Kconfig
arch/arm/boot/compressed/head.S
arch/arm/configs/mp1000_defconfig [new file with mode: 0644]
arch/arm/kernel/module.c
arch/arm/kernel/traps.c
arch/arm/lib/Makefile
arch/arm/lib/sha1.S [new file with mode: 0644]
arch/arm/mach-aaec2000/Makefile
arch/arm/mach-aaec2000/aaed2000.c
arch/arm/mach-aaec2000/clock.c [new file with mode: 0644]
arch/arm/mach-aaec2000/clock.h [new file with mode: 0644]
arch/arm/mach-aaec2000/core.c
arch/arm/mach-aaec2000/core.h
arch/arm/mach-clps711x/Kconfig
arch/arm/mach-clps711x/Makefile
arch/arm/mach-clps711x/autcpu12.c
arch/arm/mach-clps711x/cdb89712.c
arch/arm/mach-clps711x/ceiva.c
arch/arm/mach-clps711x/edb7211-mm.c
arch/arm/mach-clps711x/mm.c
arch/arm/mach-clps711x/mp1000-mach.c [new file with mode: 0644]
arch/arm/mach-clps711x/mp1000-mm.c [new file with mode: 0644]
arch/arm/mach-clps711x/mp1000-seprom.c [new file with mode: 0644]
arch/arm/mach-clps711x/p720t.c
arch/arm/mach-clps7500/core.c
arch/arm/mach-ebsa110/core.c
arch/arm/mach-ebsa110/io.c
arch/arm/mach-epxa10db/mm.c
arch/arm/mach-footbridge/common.c
arch/arm/mach-h720x/common.c
arch/arm/mach-imx/generic.c
arch/arm/mach-imx/mx1ads.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-integrator/integrator_cp.c
arch/arm/mach-iop3xx/iop321-setup.c
arch/arm/mach-iop3xx/iop331-setup.c
arch/arm/mach-iop3xx/iq31244-mm.c
arch/arm/mach-iop3xx/iq80321-mm.c
arch/arm/mach-ixp2000/core.c
arch/arm/mach-ixp2000/ixdp2x00.c
arch/arm/mach-ixp2000/ixdp2x01.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-lh7a40x/arch-kev7a400.c
arch/arm/mach-lh7a40x/arch-lpd7a40x.c
arch/arm/mach-omap1/board-innovator.c
arch/arm/mach-omap1/board-perseus2.c
arch/arm/mach-omap1/io.c
arch/arm/mach-pxa/generic.c
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/sleep.S
arch/arm/mach-pxa/standby.S
arch/arm/mach-rpc/riscpc.c
arch/arm/mach-s3c2410/cpu.h
arch/arm/mach-s3c2410/devs.c
arch/arm/mach-s3c2410/gpio.c
arch/arm/mach-s3c2410/mach-bast.c
arch/arm/mach-s3c2410/mach-h1940.c
arch/arm/mach-s3c2410/mach-smdk2440.c
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/badge4.c
arch/arm/mach-sa1100/cerf.c
arch/arm/mach-sa1100/collie.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/h3600.c
arch/arm/mach-sa1100/hackkit.c
arch/arm/mach-sa1100/jornada720.c
arch/arm/mach-sa1100/lart.c
arch/arm/mach-sa1100/neponset.c
arch/arm/mach-sa1100/simpad.c
arch/arm/mach-shark/core.c
arch/arm/mach-versatile/core.c
arch/arm/mm/consistent.c
arch/arm/mm/init.c
arch/arm/mm/ioremap.c
arch/arm/mm/mm-armv.c
arch/arm/oprofile/Makefile
arch/arm/oprofile/common.c
arch/arm/oprofile/init.c [deleted file]
arch/arm/oprofile/op_arm_model.h
arch/arm/plat-omap/sram.c
arch/frv/mb93090-mb00/pci-dma-nommu.c
arch/frv/mb93090-mb00/pci-dma.c
arch/frv/mm/dma-alloc.c
arch/ia64/hp/common/hwsw_iommu.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/lib/swiotlb.c
arch/ia64/sn/kernel/xpc.h
arch/ia64/sn/pci/pci_dma.c
arch/mips/mm/dma-coherent.c
arch/mips/mm/dma-ip27.c
arch/mips/mm/dma-ip32.c
arch/mips/mm/dma-noncoherent.c
arch/parisc/kernel/pci-dma.c
arch/ppc/8xx_io/cs4218.h
arch/ppc/8xx_io/cs4218_tdm.c
arch/ppc/kernel/dma-mapping.c
arch/ppc/mm/pgtable.c
arch/sh/boards/renesas/rts7751r2d/mach.c
arch/sh/cchips/voyagergx/consistent.c
arch/sh/drivers/pci/dma-dreamcast.c
arch/sh/mm/consistent.c
arch/sparc64/solaris/socksys.c
arch/sparc64/solaris/timod.c
arch/um/kernel/mem.c
arch/um/kernel/process_kern.c
arch/x86_64/kernel/pci-gart.c
arch/x86_64/kernel/pci-nommu.c
arch/xtensa/kernel/pci-dma.c
drivers/block/as-iosched.c
drivers/block/cfq-iosched.c
drivers/block/deadline-iosched.c
drivers/block/elevator.c
drivers/block/ll_rw_blk.c
drivers/block/loop.c
drivers/block/noop-iosched.c
drivers/block/rd.c
drivers/block/sx8.c
drivers/char/n_tty.c
drivers/char/nvram.c
drivers/ieee1394/eth1394.c
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.h
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/mthca/mthca_memfree.h
drivers/md/bitmap.c
drivers/md/dm-crypt.c
drivers/mmc/mmci.c
drivers/mtd/maps/sa1100-flash.c
drivers/net/8139cp.c
drivers/net/8139too.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/arm/am79c961a.c
drivers/net/au1000_eth.c
drivers/net/b44.c
drivers/net/b44.h
drivers/net/bonding/bond_main.c
drivers/net/cassini.c
drivers/net/cs89x0.c
drivers/net/cs89x0.h
drivers/net/declance.c
drivers/net/e100.c
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_param.c
drivers/net/epic100.c
drivers/net/forcedeth.c
drivers/net/gianfar.c
drivers/net/gianfar.h
drivers/net/gianfar_ethtool.c
drivers/net/gianfar_mii.c [new file with mode: 0644]
drivers/net/gianfar_mii.h [new file with mode: 0644]
drivers/net/gianfar_phy.c [deleted file]
drivers/net/gianfar_phy.h [deleted file]
drivers/net/hamradio/Kconfig
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/mkiss.c
drivers/net/hamradio/mkiss.h [deleted file]
drivers/net/hp100.c
drivers/net/irda/Kconfig
drivers/net/irda/Makefile
drivers/net/irda/pxaficp_ir.c [new file with mode: 0644]
drivers/net/irda/stir4200.c
drivers/net/ixgb/ixgb_ethtool.c
drivers/net/ixgb/ixgb_main.c
drivers/net/lance.c
drivers/net/lne390.c
drivers/net/mii.c
drivers/net/mipsnet.c [new file with mode: 0644]
drivers/net/mipsnet.h [new file with mode: 0644]
drivers/net/myri_sbus.c
drivers/net/myri_sbus.h
drivers/net/ne.c
drivers/net/ne2k-pci.c
drivers/net/ns83820.c
drivers/net/pcnet32.c
drivers/net/phy/Kconfig
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/r8169.c
drivers/net/rionet.c [new file with mode: 0644]
drivers/net/s2io-regs.h
drivers/net/s2io.c
drivers/net/s2io.h
drivers/net/sb1250-mac.c
drivers/net/sgiseeq.c
drivers/net/skge.c
drivers/net/sunbmac.c
drivers/net/sunbmac.h
drivers/net/sundance.c
drivers/net/tokenring/ibmtr.c
drivers/net/tokenring/olympic.c
drivers/net/tokenring/tms380tr.c
drivers/net/tulip/de2104x.c
drivers/net/typhoon.c
drivers/net/via-rhine.c
drivers/net/wan/cosa.c
drivers/net/wan/cycx_drv.c
drivers/net/wan/cycx_main.c
drivers/net/wan/cycx_x25.c
drivers/net/wan/dscc4.c
drivers/net/wan/farsync.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/lmc/lmc_debug.c
drivers/net/wan/lmc/lmc_media.c
drivers/net/wan/pc300.h
drivers/net/wan/pc300_drv.c
drivers/net/wan/pc300_tty.c
drivers/net/wan/sdla.c
drivers/net/wan/sdla_fr.c
drivers/net/wan/sdla_x25.c
drivers/net/wan/sdladrv.c
drivers/net/wan/syncppp.c
drivers/net/wireless/airo.c
drivers/net/wireless/airport.c
drivers/net/wireless/atmel.c
drivers/net/wireless/hermes.c
drivers/net/wireless/hermes.h
drivers/net/wireless/hostap/hostap.c
drivers/net/wireless/hostap/hostap_80211_rx.c
drivers/net/wireless/hostap/hostap_80211_tx.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_ap.h
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_pci.c
drivers/net/wireless/hostap/hostap_plx.c
drivers/net/wireless/hostap/hostap_wlan.h
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2100.h
drivers/net/wireless/ipw2200.c
drivers/net/wireless/ipw2200.h
drivers/net/wireless/netwave_cs.c
drivers/net/wireless/orinoco.c
drivers/net/wireless/orinoco.h
drivers/net/wireless/orinoco_cs.c
drivers/net/wireless/orinoco_nortel.c
drivers/net/wireless/orinoco_pci.c
drivers/net/wireless/orinoco_plx.c
drivers/net/wireless/orinoco_tmd.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/islpci_dev.h
drivers/net/wireless/prism54/islpci_mgt.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/spectrum_cs.c
drivers/net/wireless/wavelan.c
drivers/net/wireless/wavelan.p.h
drivers/net/wireless/wavelan_cs.c
drivers/net/wireless/wavelan_cs.p.h
drivers/net/wireless/wl3501.h
drivers/parisc/ccio-dma.c
drivers/parisc/sba_iommu.c
drivers/pcmcia/sa1111_generic.c
drivers/s390/net/fsm.c
drivers/s390/net/fsm.h
drivers/s390/net/qeth.h
drivers/s390/net/qeth_fs.h
drivers/s390/net/qeth_main.c
drivers/s390/net/qeth_mpc.c
drivers/s390/net/qeth_mpc.h
drivers/s390/net/qeth_sys.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/ahci.c
drivers/scsi/ata_piix.c
drivers/scsi/eata.c
drivers/scsi/hosts.c
drivers/scsi/libata-core.c
drivers/scsi/libata-scsi.c
drivers/scsi/libata.h
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/osst.c
drivers/scsi/pdc_adma.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_rscn.c
drivers/scsi/sata_mv.c
drivers/scsi/sata_nv.c
drivers/scsi/sata_promise.c
drivers/scsi/sata_qstor.c
drivers/scsi/sata_sil.c
drivers/scsi/sata_sil24.c [new file with mode: 0644]
drivers/scsi/sata_sis.c
drivers/scsi/sata_svw.c
drivers/scsi/sata_sx4.c
drivers/scsi/sata_uli.c
drivers/scsi/sata_via.c
drivers/scsi/sata_vsc.c
drivers/scsi/scsi.c
drivers/scsi/scsi_ioctl.c
drivers/scsi/scsi_lib.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/serial/amba-pl010.c
drivers/serial/amba-pl011.c
drivers/serial/clps711x.c
drivers/serial/pxa.c
drivers/usb/core/buffer.c
drivers/usb/core/hcd.c
drivers/usb/core/hcd.h
drivers/usb/core/message.c
drivers/usb/core/urb.c
drivers/usb/core/usb.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/ether.c
drivers/usb/gadget/goku_udc.c
drivers/usb/gadget/lh7a40x_udc.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/omap_udc.c
drivers/usb/gadget/pxa2xx_udc.c
drivers/usb/gadget/pxa2xx_udc.h
drivers/usb/gadget/serial.c
drivers/usb/gadget/zero.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-mem.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/isp116x-hcd.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-mem.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/uhci-q.c
drivers/usb/misc/uss720.c
drivers/usb/net/asix.c
drivers/usb/net/gl620a.c
drivers/usb/net/kaweth.c
drivers/usb/net/net1080.c
drivers/usb/net/rndis_host.c
drivers/usb/net/usbnet.c
drivers/usb/net/usbnet.h
drivers/usb/net/zaurus.c
drivers/usb/net/zd1201.c
drivers/video/amba-clcd.c
fs/afs/file.c
fs/bio.c
fs/buffer.c
fs/dcache.c
fs/dquot.c
fs/exec.c
fs/ext3/inode.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/inode.c
fs/jbd/journal.c
fs/jbd/transaction.c
fs/jfs/jfs_metapage.c
fs/lockd/host.c
fs/locks.c
fs/mbcache.c
fs/namei.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs2xdr.c
fs/nfs/nfs3proc.c
fs/nfs/nfs3xdr.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/proc.c
fs/nfs/read.c
fs/nfs/write.c
fs/open.c
fs/partitions/check.c
fs/reiserfs/fix_node.c
fs/reiserfs/inode.c
fs/reiserfs/xattr.c
fs/xfs/linux-2.6/kmem.c
fs/xfs/linux-2.6/kmem.h
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
include/asm-alpha/dma-mapping.h
include/asm-arm/arch-aaec2000/aaec2000.h
include/asm-arm/arch-aaec2000/aaed2000.h [new file with mode: 0644]
include/asm-arm/arch-aaec2000/hardware.h
include/asm-arm/arch-aaec2000/io.h
include/asm-arm/arch-cl7500/io.h
include/asm-arm/arch-clps711x/hardware.h
include/asm-arm/arch-clps711x/io.h
include/asm-arm/arch-clps711x/mp1000-seprom.h [new file with mode: 0644]
include/asm-arm/arch-ebsa285/io.h
include/asm-arm/arch-epxa10db/io.h
include/asm-arm/arch-h720x/io.h
include/asm-arm/arch-imx/io.h
include/asm-arm/arch-integrator/hardware.h
include/asm-arm/arch-integrator/io.h
include/asm-arm/arch-iop3xx/io.h
include/asm-arm/arch-ixp2000/io.h
include/asm-arm/arch-ixp2000/ixp2000-regs.h
include/asm-arm/arch-l7200/io.h
include/asm-arm/arch-lh7a40x/io.h
include/asm-arm/arch-omap/io.h
include/asm-arm/arch-pxa/hardware.h
include/asm-arm/arch-pxa/io.h
include/asm-arm/arch-pxa/irda.h [new file with mode: 0644]
include/asm-arm/arch-pxa/pxa-regs.h
include/asm-arm/arch-pxa/uncompress.h
include/asm-arm/arch-rpc/io.h
include/asm-arm/arch-s3c2410/fb.h
include/asm-arm/arch-s3c2410/io.h
include/asm-arm/arch-s3c2410/regs-gpio.h
include/asm-arm/arch-sa1100/hardware.h
include/asm-arm/arch-sa1100/io.h
include/asm-arm/arch-sa1100/system.h
include/asm-arm/arch-shark/io.h
include/asm-arm/dma-mapping.h
include/asm-arm/io.h
include/asm-arm/mach/arch.h
include/asm-arm/mach/map.h
include/asm-cris/dma-mapping.h
include/asm-frv/dma-mapping.h
include/asm-frv/pci.h
include/asm-generic/dma-mapping-broken.h
include/asm-ia64/machvec.h
include/asm-m32r/dma-mapping.h
include/asm-mips/dma-mapping.h
include/asm-mips/sgi/hpc3.h
include/asm-parisc/dma-mapping.h
include/asm-ppc/dma-mapping.h
include/asm-sh/dma-mapping.h
include/asm-sh/machvec.h
include/asm-sh64/dma-mapping.h
include/asm-sparc/dma-mapping.h
include/asm-sparc64/dma-mapping.h
include/asm-um/dma-mapping.h
include/asm-um/page.h
include/asm-x86_64/dma-mapping.h
include/asm-x86_64/swiotlb.h
include/asm-xtensa/dma-mapping.h
include/linux/ata.h
include/linux/audit.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/buffer_head.h
include/linux/cyclomx.h
include/linux/cycx_drv.h
include/linux/elevator.h
include/linux/fs.h
include/linux/genhd.h
include/linux/gfp.h
include/linux/i2o.h
include/linux/ibmtr.h
include/linux/idr.h
include/linux/if_arp.h
include/linux/jbd.h
include/linux/kobject.h
include/linux/libata.h
include/linux/loop.h
include/linux/mbcache.h
include/linux/mii.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/namei.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/pagemap.h
include/linux/radix-tree.h
include/linux/reiserfs_fs.h
include/linux/sdladrv.h
include/linux/security.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/sunrpc/auth.h
include/linux/sunrpc/debug.h
include/linux/sunrpc/gss_api.h
include/linux/sunrpc/gss_err.h
include/linux/sunrpc/gss_krb5.h
include/linux/sunrpc/gss_spkm3.h
include/linux/sunrpc/msg_prot.h
include/linux/sunrpc/xdr.h
include/linux/sunrpc/xprt.h
include/linux/suspend.h
include/linux/swap.h
include/linux/textsearch.h
include/linux/types.h
include/linux/usb.h
include/linux/usb_gadget.h
include/linux/wanpipe.h
include/net/dst.h
include/net/ieee80211.h
include/net/ieee80211_crypt.h
include/net/ieee80211_radiotap.h [new file with mode: 0644]
include/net/sock.h
include/net/syncppp.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_request.h
include/sound/memalloc.h
kernel/audit.c
kernel/auditsc.c
kernel/kexec.c
kernel/power/swsusp.c
lib/idr.c
lib/kobject.c
lib/kobject_uevent.c
lib/textsearch.c
mm/filemap.c
mm/highmem.c
mm/mempolicy.c
mm/mempool.c
mm/page_alloc.c
mm/shmem.c
mm/slab.c
mm/vmscan.c
net/core/neighbour.c
net/core/pktgen.c
net/core/skbuff.c
net/core/sock.c
net/dccp/output.c
net/decnet/af_decnet.c
net/ieee80211/Makefile
net/ieee80211/ieee80211_crypt.c
net/ieee80211/ieee80211_crypt_ccmp.c
net/ieee80211/ieee80211_crypt_tkip.c
net/ieee80211/ieee80211_crypt_wep.c
net/ieee80211/ieee80211_geo.c [new file with mode: 0644]
net/ieee80211/ieee80211_module.c
net/ieee80211/ieee80211_rx.c
net/ieee80211/ieee80211_tx.c
net/ieee80211/ieee80211_wx.c
net/ipv4/devinet.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/ip_output.c
net/ipv4/netfilter/ip_conntrack_core.c
net/ipv4/proc.c
net/ipv6/icmp.c
net/ipv6/proc.c
net/netlink/af_netlink.c
net/rose/rose_route.c
net/sctp/proc.c
net/sunrpc/Makefile
net/sunrpc/auth.c
net/sunrpc/auth_gss/Makefile
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_krb5_seal.c
net/sunrpc/auth_gss/gss_krb5_unseal.c
net/sunrpc/auth_gss/gss_krb5_wrap.c [new file with mode: 0644]
net/sunrpc/auth_gss/gss_mech_switch.c
net/sunrpc/auth_gss/gss_spkm3_mech.c
net/sunrpc/auth_gss/gss_spkm3_seal.c
net/sunrpc/auth_gss/gss_spkm3_unseal.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/auth_null.c
net/sunrpc/auth_unix.c
net/sunrpc/clnt.c
net/sunrpc/pmap_clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/socklib.c [new file with mode: 0644]
net/sunrpc/sunrpc_syms.c
net/sunrpc/svcsock.c
net/sunrpc/sysctl.c
net/sunrpc/xdr.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c [new file with mode: 0644]
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
security/dummy.c
security/selinux/hooks.c
sound/arm/aaci.c
sound/core/memalloc.c
sound/core/seq/instr/ainstr_gf1.c
sound/core/seq/instr/ainstr_iw.c
sound/core/seq/instr/ainstr_simple.c
sound/oss/dmasound/dmasound.h
sound/oss/dmasound/dmasound_atari.c
sound/oss/dmasound/dmasound_awacs.c
sound/oss/dmasound/dmasound_paula.c
sound/oss/dmasound/dmasound_q40.c
sound/usb/usbmidi.c

index 375ae760dc1ed1112d47874adde4de738ba5079e..d260d92089ade348879bda74b2bf9ae48714d110 100644 (file)
@@ -415,6 +415,362 @@ and other resources, etc.
      </sect1>
   </chapter>
 
+  <chapter id="libataEH">
+        <title>Error handling</title>
+
+       <para>
+       This chapter describes how errors are handled under libata.
+       Readers are advised to read SCSI EH
+       (Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first.
+       </para>
+
+       <sect1><title>Origins of commands</title>
+       <para>
+       In libata, a command is represented with struct ata_queued_cmd
+       or qc.  qc's are preallocated during port initialization and
+       repetitively used for command executions.  Currently only one
+       qc is allocated per port but yet-to-be-merged NCQ branch
+       allocates one for each tag and maps each qc to NCQ tag 1-to-1.
+       </para>
+       <para>
+       libata commands can originate from two sources - libata itself
+       and SCSI midlayer.  libata internal commands are used for
+       initialization and error handling.  All normal blk requests
+       and commands for SCSI emulation are passed as SCSI commands
+       through queuecommand callback of SCSI host template.
+       </para>
+       </sect1>
+
+       <sect1><title>How commands are issued</title>
+
+       <variablelist>
+
+       <varlistentry><term>Internal commands</term>
+       <listitem>
+       <para>
+       First, qc is allocated and initialized using
+       ata_qc_new_init().  Although ata_qc_new_init() doesn't
+       implement any wait or retry mechanism when qc is not
+       available, internal commands are currently issued only during
+       initialization and error recovery, so no other command is
+       active and allocation is guaranteed to succeed.
+       </para>
+       <para>
+       Once allocated qc's taskfile is initialized for the command to
+       be executed.  qc currently has two mechanisms to notify
+       completion.  One is via qc->complete_fn() callback and the
+       other is completion qc->waiting.  qc->complete_fn() callback
+       is the asynchronous path used by normal SCSI translated
+       commands and qc->waiting is the synchronous (issuer sleeps in
+       process context) path used by internal commands.
+       </para>
+       <para>
+       Once initialization is complete, host_set lock is acquired
+       and the qc is issued.
+       </para>
+       </listitem>
+       </varlistentry>
+
+       <varlistentry><term>SCSI commands</term>
+       <listitem>
+       <para>
+       All libata drivers use ata_scsi_queuecmd() as
+       hostt->queuecommand callback.  scmds can either be simulated
+       or translated.  No qc is involved in processing a simulated
+       scmd.  The result is computed right away and the scmd is
+       completed.
+       </para>
+       <para>
+       For a translated scmd, ata_qc_new_init() is invoked to
+       allocate a qc and the scmd is translated into the qc.  SCSI
+       midlayer's completion notification function pointer is stored
+       into qc->scsidone.
+       </para>
+       <para>
+       qc->complete_fn() callback is used for completion
+       notification.  ATA commands use ata_scsi_qc_complete() while
+       ATAPI commands use atapi_qc_complete().  Both functions end up
+       calling qc->scsidone to notify upper layer when the qc is
+       finished.  After translation is completed, the qc is issued
+       with ata_qc_issue().
+       </para>
+       <para>
+       Note that SCSI midlayer invokes hostt->queuecommand while
+       holding host_set lock, so all above occur while holding
+       host_set lock.
+       </para>
+       </listitem>
+       </varlistentry>
+
+       </variablelist>
+       </sect1>
+
+       <sect1><title>How commands are processed</title>
+       <para>
+       Depending on which protocol and which controller are used,
+       commands are processed differently.  For the purpose of
+       discussion, a controller which uses taskfile interface and all
+       standard callbacks is assumed.
+       </para>
+       <para>
+       Currently 6 ATA command protocols are used.  They can be
+       sorted into the following four categories according to how
+       they are processed.
+       </para>
+
+       <variablelist>
+          <varlistentry><term>ATA NO DATA or DMA</term>
+          <listitem>
+          <para>
+          ATA_PROT_NODATA and ATA_PROT_DMA fall into this category.
+          These types of commands don't require any software
+          intervention once issued.  Device will raise interrupt on
+          completion.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>ATA PIO</term>
+          <listitem>
+          <para>
+          ATA_PROT_PIO is in this category.  libata currently
+          implements PIO with polling.  ATA_NIEN bit is set to turn
+          off interrupt and pio_task on ata_wq performs polling and
+          IO.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>ATAPI NODATA or DMA</term>
+          <listitem>
+          <para>
+          ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this
+          category.  packet_task is used to poll BSY bit after
+          issuing PACKET command.  Once BSY is turned off by the
+          device, packet_task transfers CDB and hands off processing
+          to interrupt handler.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>ATAPI PIO</term>
+          <listitem>
+          <para>
+          ATA_PROT_ATAPI is in this category.  ATA_NIEN bit is set
+          and, as in ATAPI NODATA or DMA, packet_task submits cdb.
+          However, after submitting cdb, further processing (data
+          transfer) is handed off to pio_task.
+          </para>
+          </listitem>
+          </varlistentry>
+       </variablelist>
+        </sect1>
+
+       <sect1><title>How commands are completed</title>
+       <para>
+       Once issued, all qc's are either completed with
+       ata_qc_complete() or time out.  For commands which are handled
+       by interrupts, ata_host_intr() invokes ata_qc_complete(), and,
+       for PIO tasks, pio_task invokes ata_qc_complete().  In error
+       cases, packet_task may also complete commands.
+       </para>
+       <para>
+       ata_qc_complete() does the following.
+       </para>
+
+       <orderedlist>
+
+       <listitem>
+       <para>
+       DMA memory is unmapped.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       ATA_QCFLAG_ACTIVE is clared from qc->flags.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       qc->complete_fn() callback is invoked.  If the return value of
+       the callback is not zero.  Completion is short circuited and
+       ata_qc_complete() returns.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       __ata_qc_complete() is called, which does
+          <orderedlist>
+
+          <listitem>
+          <para>
+          qc->flags is cleared to zero.
+          </para>
+          </listitem>
+
+          <listitem>
+          <para>
+          ap->active_tag and qc->tag are poisoned.
+          </para>
+          </listitem>
+
+          <listitem>
+          <para>
+          qc->waiting is claread &amp; completed (in that order).
+          </para>
+          </listitem>
+
+          <listitem>
+          <para>
+          qc is deallocated by clearing appropriate bit in ap->qactive.
+          </para>
+          </listitem>
+
+          </orderedlist>
+       </para>
+       </listitem>
+
+       </orderedlist>
+
+       <para>
+       So, it basically notifies upper layer and deallocates qc.  One
+       exception is short-circuit path in #3 which is used by
+       atapi_qc_complete().
+       </para>
+       <para>
+       For all non-ATAPI commands, whether it fails or not, almost
+       the same code path is taken and very little error handling
+       takes place.  A qc is completed with success status if it
+       succeeded, with failed status otherwise.
+       </para>
+       <para>
+       However, failed ATAPI commands require more handling as
+       REQUEST SENSE is needed to acquire sense data.  If an ATAPI
+       command fails, ata_qc_complete() is invoked with error status,
+       which in turn invokes atapi_qc_complete() via
+       qc->complete_fn() callback.
+       </para>
+       <para>
+       This makes atapi_qc_complete() set scmd->result to
+       SAM_STAT_CHECK_CONDITION, complete the scmd and return 1.  As
+       the sense data is empty but scmd->result is CHECK CONDITION,
+       SCSI midlayer will invoke EH for the scmd, and returning 1
+       makes ata_qc_complete() to return without deallocating the qc.
+       This leads us to ata_scsi_error() with partially completed qc.
+       </para>
+
+       </sect1>
+
+       <sect1><title>ata_scsi_error()</title>
+       <para>
+       ata_scsi_error() is the current hostt->eh_strategy_handler()
+       for libata.  As discussed above, this will be entered in two
+       cases - timeout and ATAPI error completion.  This function
+       calls low level libata driver's eng_timeout() callback, the
+       standard callback for which is ata_eng_timeout().  It checks
+       if a qc is active and calls ata_qc_timeout() on the qc if so.
+       Actual error handling occurs in ata_qc_timeout().
+       </para>
+       <para>
+       If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and
+       completes the qc.  Note that as we're currently in EH, we
+       cannot call scsi_done.  As described in SCSI EH doc, a
+       recovered scmd should be either retried with
+       scsi_queue_insert() or finished with scsi_finish_command().
+       Here, we override qc->scsidone with scsi_finish_command() and
+       calls ata_qc_complete().
+       </para>
+       <para>
+       If EH is invoked due to a failed ATAPI qc, the qc here is
+       completed but not deallocated.  The purpose of this
+       half-completion is to use the qc as place holder to make EH
+       code reach this place.  This is a bit hackish, but it works.
+       </para>
+       <para>
+       Once control reaches here, the qc is deallocated by invoking
+       __ata_qc_complete() explicitly.  Then, internal qc for REQUEST
+       SENSE is issued.  Once sense data is acquired, scmd is
+       finished by directly invoking scsi_finish_command() on the
+       scmd.  Note that as we already have completed and deallocated
+       the qc which was associated with the scmd, we don't need
+       to/cannot call ata_qc_complete() again.
+       </para>
+
+       </sect1>
+
+       <sect1><title>Problems with the current EH</title>
+
+       <itemizedlist>
+
+       <listitem>
+       <para>
+       Error representation is too crude.  Currently any and all
+       error conditions are represented with ATA STATUS and ERROR
+       registers.  Errors which aren't ATA device errors are treated
+       as ATA device errors by setting ATA_ERR bit.  Better error
+       descriptor which can properly represent ATA and other
+       errors/exceptions is needed.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       When handling timeouts, no action is taken to make device
+       forget about the timed out command and ready for new commands.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       EH handling via ata_scsi_error() is not properly protected
+       from usual command processing.  On EH entrance, the device is
+       not in quiescent state.  Timed out commands may succeed or
+       fail any time.  pio_task and atapi_task may still be running.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       Too weak error recovery.  Devices / controllers causing HSM
+       mismatch errors and other errors quite often require reset to
+       return to known state.  Also, advanced error handling is
+       necessary to support features like NCQ and hotplug.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       ATA errors are directly handled in the interrupt handler and
+       PIO errors in pio_task.  This is problematic for advanced
+       error handling for the following reasons.
+       </para>
+       <para>
+       First, advanced error handling often requires context and
+       internal qc execution.
+       </para>
+       <para>
+       Second, even a simple failure (say, CRC error) needs
+       information gathering and could trigger complex error handling
+       (say, resetting &amp; reconfiguring).  Having multiple code
+       paths to gather information, enter EH and trigger actions
+       makes life painful.
+       </para>
+       <para>
+       Third, scattered EH code makes implementing low level drivers
+       difficult.  Low level drivers override libata callbacks.  If
+       EH is scattered over several places, each affected callbacks
+       should perform its part of error handling.  This can be error
+       prone and painful.
+       </para>
+       </listitem>
+
+       </itemizedlist>
+       </sect1>
+  </chapter>
+
   <chapter id="libataExt">
      <title>libata Library</title>
 !Edrivers/scsi/libata-core.c
@@ -431,6 +787,722 @@ and other resources, etc.
 !Idrivers/scsi/libata-scsi.c
   </chapter>
 
+  <chapter id="ataExceptions">
+     <title>ATA errors &amp; exceptions</title>
+
+  <para>
+  This chapter tries to identify what error/exception conditions exist
+  for ATA/ATAPI devices and describe how they should be handled in
+  implementation-neutral way.
+  </para>
+
+  <para>
+  The term 'error' is used to describe conditions where either an
+  explicit error condition is reported from device or a command has
+  timed out.
+  </para>
+
+  <para>
+  The term 'exception' is either used to describe exceptional
+  conditions which are not errors (say, power or hotplug events), or
+  to describe both errors and non-error exceptional conditions.  Where
+  explicit distinction between error and exception is necessary, the
+  term 'non-error exception' is used.
+  </para>
+
+  <sect1 id="excat">
+     <title>Exception categories</title>
+     <para>
+     Exceptions are described primarily with respect to legacy
+     taskfile + bus master IDE interface.  If a controller provides
+     other better mechanism for error reporting, mapping those into
+     categories described below shouldn't be difficult.
+     </para>
+
+     <para>
+     In the following sections, two recovery actions - reset and
+     reconfiguring transport - are mentioned.  These are described
+     further in <xref linkend="exrec"/>.
+     </para>
+
+     <sect2 id="excatHSMviolation">
+        <title>HSM violation</title>
+        <para>
+        This error is indicated when STATUS value doesn't match HSM
+        requirement during issuing or excution any ATA/ATAPI command.
+        </para>
+
+       <itemizedlist>
+       <title>Examples</title>
+
+        <listitem>
+       <para>
+       ATA_STATUS doesn't contain !BSY &amp;&amp; DRDY &amp;&amp; !DRQ while trying
+       to issue a command.
+        </para>
+       </listitem>
+
+        <listitem>
+       <para>
+       !BSY &amp;&amp; !DRQ during PIO data transfer.
+        </para>
+       </listitem>
+
+        <listitem>
+       <para>
+       DRQ on command completion.
+        </para>
+       </listitem>
+
+        <listitem>
+       <para>
+       !BSY &amp;&amp; ERR after CDB tranfer starts but before the
+        last byte of CDB is transferred.  ATA/ATAPI standard states
+        that &quot;The device shall not terminate the PACKET command
+        with an error before the last byte of the command packet has
+        been written&quot; in the error outputs description of PACKET
+        command and the state diagram doesn't include such
+        transitions.
+       </para>
+       </listitem>
+
+       </itemizedlist>
+
+       <para>
+       In these cases, HSM is violated and not much information
+       regarding the error can be acquired from STATUS or ERROR
+       register.  IOW, this error can be anything - driver bug,
+       faulty device, controller and/or cable.
+       </para>
+
+       <para>
+       As HSM is violated, reset is necessary to restore known state.
+       Reconfiguring transport for lower speed might be helpful too
+       as transmission errors sometimes cause this kind of errors.
+       </para>
+     </sect2>
+     
+     <sect2 id="excatDevErr">
+        <title>ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)</title>
+
+       <para>
+       These are errors detected and reported by ATA/ATAPI devices
+       indicating device problems.  For this type of errors, STATUS
+       and ERROR register values are valid and describe error
+       condition.  Note that some of ATA bus errors are detected by
+       ATA/ATAPI devices and reported using the same mechanism as
+       device errors.  Those cases are described later in this
+       section.
+       </para>
+
+       <para>
+       For ATA commands, this type of errors are indicated by !BSY
+       &amp;&amp; ERR during command execution and on completion.
+       </para>
+
+       <para>For ATAPI commands,</para>
+
+       <itemizedlist>
+
+       <listitem>
+       <para>
+       !BSY &amp;&amp; ERR &amp;&amp; ABRT right after issuing PACKET
+       indicates that PACKET command is not supported and falls in
+       this category.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       !BSY &amp;&amp; ERR(==CHK) &amp;&amp; !ABRT after the last
+       byte of CDB is transferred indicates CHECK CONDITION and
+       doesn't fall in this category.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       !BSY &amp;&amp; ERR(==CHK) &amp;&amp; ABRT after the last byte
+        of CDB is transferred *probably* indicates CHECK CONDITION and
+        doesn't fall in this category.
+       </para>
+       </listitem>
+
+       </itemizedlist>
+
+       <para>
+       Of errors detected as above, the followings are not ATA/ATAPI
+       device errors but ATA bus errors and should be handled
+       according to <xref linkend="excatATAbusErr"/>.
+       </para>
+
+       <variablelist>
+
+          <varlistentry>
+          <term>CRC error during data transfer</term>
+          <listitem>
+          <para>
+          This is indicated by ICRC bit in the ERROR register and
+          means that corruption occurred during data transfer.  Upto
+          ATA/ATAPI-7, the standard specifies that this bit is only
+          applicable to UDMA transfers but ATA/ATAPI-8 draft revision
+          1f says that the bit may be applicable to multiword DMA and
+          PIO.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry>
+          <term>ABRT error during data transfer or on completion</term>
+          <listitem>
+          <para>
+          Upto ATA/ATAPI-7, the standard specifies that ABRT could be
+          set on ICRC errors and on cases where a device is not able
+          to complete a command.  Combined with the fact that MWDMA
+          and PIO transfer errors aren't allowed to use ICRC bit upto
+          ATA/ATAPI-7, it seems to imply that ABRT bit alone could
+          indicate tranfer errors.
+          </para>
+          <para>
+          However, ATA/ATAPI-8 draft revision 1f removes the part
+          that ICRC errors can turn on ABRT.  So, this is kind of
+          gray area.  Some heuristics are needed here.
+          </para>
+          </listitem>
+          </varlistentry>
+
+       </variablelist>
+
+       <para>
+       ATA/ATAPI device errors can be further categorized as follows.
+       </para>
+
+       <variablelist>
+
+          <varlistentry>
+          <term>Media errors</term>
+          <listitem>
+          <para>
+          This is indicated by UNC bit in the ERROR register.  ATA
+          devices reports UNC error only after certain number of
+          retries cannot recover the data, so there's nothing much
+          else to do other than notifying upper layer.
+          </para>
+          <para>
+          READ and WRITE commands report CHS or LBA of the first
+          failed sector but ATA/ATAPI standard specifies that the
+          amount of transferred data on error completion is
+          indeterminate, so we cannot assume that sectors preceding
+          the failed sector have been transferred and thus cannot
+          complete those sectors successfully as SCSI does.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry>
+          <term>Media changed / media change requested error</term>
+          <listitem>
+          <para>
+          &lt;&lt;TODO: fill here&gt;&gt;
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>Address error</term>
+          <listitem>
+          <para>
+          This is indicated by IDNF bit in the ERROR register.
+          Report to upper layer.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>Other errors</term>
+          <listitem>
+          <para>
+          This can be invalid command or parameter indicated by ABRT
+          ERROR bit or some other error condition.  Note that ABRT
+          bit can indicate a lot of things including ICRC and Address
+          errors.  Heuristics needed.
+          </para>
+          </listitem>
+          </varlistentry>
+
+       </variablelist>
+
+       <para>
+       Depending on commands, not all STATUS/ERROR bits are
+       applicable.  These non-applicable bits are marked with
+       &quot;na&quot; in the output descriptions but upto ATA/ATAPI-7
+       no definition of &quot;na&quot; can be found.  However,
+       ATA/ATAPI-8 draft revision 1f describes &quot;N/A&quot; as
+       follows.
+       </para>
+
+       <blockquote>
+       <variablelist>
+          <varlistentry><term>3.2.3.3a N/A</term>
+          <listitem>
+          <para>
+          A keyword the indicates a field has no defined value in
+          this standard and should not be checked by the host or
+          device. N/A fields should be cleared to zero.
+          </para>
+          </listitem>
+          </varlistentry>
+       </variablelist>
+       </blockquote>
+
+       <para>
+       So, it seems reasonable to assume that &quot;na&quot; bits are
+       cleared to zero by devices and thus need no explicit masking.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatATAPIcc">
+        <title>ATAPI device CHECK CONDITION</title>
+
+       <para>
+       ATAPI device CHECK CONDITION error is indicated by set CHK bit
+       (ERR bit) in the STATUS register after the last byte of CDB is
+       transferred for a PACKET command.  For this kind of errors,
+       sense data should be acquired to gather information regarding
+       the errors.  REQUEST SENSE packet command should be used to
+       acquire sense data.
+       </para>
+
+       <para>
+       Once sense data is acquired, this type of errors can be
+       handled similary to other SCSI errors.  Note that sense data
+       may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
+       &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR).  In such
+       cases, the error should be considered as an ATA bus error and
+       handled according to <xref linkend="excatATAbusErr"/>.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatNCQerr">
+        <title>ATA device error (NCQ)</title>
+
+       <para>
+       NCQ command error is indicated by cleared BSY and set ERR bit
+       during NCQ command phase (one or more NCQ commands
+       outstanding).  Although STATUS and ERROR registers will
+       contain valid values describing the error, READ LOG EXT is
+       required to clear the error condition, determine which command
+       has failed and acquire more information.
+       </para>
+
+       <para>
+       READ LOG EXT Log Page 10h reports which tag has failed and
+       taskfile register values describing the error.  With this
+       information the failed command can be handled as a normal ATA
+       command error as in <xref linkend="excatDevErr"/> and all
+       other in-flight commands must be retried.  Note that this
+       retry should not be counted - it's likely that commands
+       retried this way would have completed normally if it were not
+       for the failed command.
+       </para>
+
+       <para>
+       Note that ATA bus errors can be reported as ATA device NCQ
+       errors.  This should be handled as described in <xref
+       linkend="excatATAbusErr"/>.
+       </para>
+
+       <para>
+       If READ LOG EXT Log Page 10h fails or reports NQ, we're
+       thoroughly screwed.  This condition should be treated
+       according to <xref linkend="excatHSMviolation"/>.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatATAbusErr">
+        <title>ATA bus error</title>
+
+       <para>
+       ATA bus error means that data corruption occurred during
+       transmission over ATA bus (SATA or PATA).  This type of errors
+       can be indicated by
+       </para>
+
+       <itemizedlist>
+
+       <listitem>
+       <para>
+       ICRC or ABRT error as described in <xref linkend="excatDevErr"/>.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       Controller-specific error completion with error information
+       indicating transmission error.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       On some controllers, command timeout.  In this case, there may
+       be a mechanism to determine that the timeout is due to
+       transmission error.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       Unknown/random errors, timeouts and all sorts of weirdities.
+       </para>
+       </listitem>
+
+       </itemizedlist>
+
+       <para>
+       As described above, transmission errors can cause wide variety
+       of symptoms ranging from device ICRC error to random device
+       lockup, and, for many cases, there is no way to tell if an
+       error condition is due to transmission error or not;
+       therefore, it's necessary to employ some kind of heuristic
+       when dealing with errors and timeouts.  For example,
+       encountering repetitive ABRT errors for known supported
+       command is likely to indicate ATA bus error.
+       </para>
+
+       <para>
+       Once it's determined that ATA bus errors have possibly
+       occurred, lowering ATA bus transmission speed is one of
+       actions which may alleviate the problem.  See <xref
+       linkend="exrecReconf"/> for more information.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatPCIbusErr">
+        <title>PCI bus error</title>
+
+       <para>
+       Data corruption or other failures during transmission over PCI
+       (or other system bus).  For standard BMDMA, this is indicated
+       by Error bit in the BMDMA Status register.  This type of
+       errors must be logged as it indicates something is very wrong
+       with the system.  Resetting host controller is recommended.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatLateCompletion">
+        <title>Late completion</title>
+
+       <para>
+       This occurs when timeout occurs and the timeout handler finds
+       out that the timed out command has completed successfully or
+       with error.  This is usually caused by lost interrupts.  This
+       type of errors must be logged.  Resetting host controller is
+       recommended.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatUnknown">
+        <title>Unknown error (timeout)</title>
+
+       <para>
+       This is when timeout occurs and the command is still
+       processing or the host and device are in unknown state.  When
+       this occurs, HSM could be in any valid or invalid state.  To
+       bring the device to known state and make it forget about the
+       timed out command, resetting is necessary.  The timed out
+       command may be retried.
+       </para>
+
+       <para>
+       Timeouts can also be caused by transmission errors.  Refer to
+       <xref linkend="excatATAbusErr"/> for more details.
+       </para>
+
+     </sect2>
+
+     <sect2 id="excatHoplugPM">
+        <title>Hotplug and power management exceptions</title>
+
+       <para>
+       &lt;&lt;TODO: fill here&gt;&gt;
+       </para>
+
+     </sect2>
+
+  </sect1>
+
+  <sect1 id="exrec">
+     <title>EH recovery actions</title>
+
+     <para>
+     This section discusses several important recovery actions.
+     </para>
+
+     <sect2 id="exrecClr">
+        <title>Clearing error condition</title>
+
+       <para>
+       Many controllers require its error registers to be cleared by
+       error handler.  Different controllers may have different
+       requirements.
+       </para>
+
+       <para>
+       For SATA, it's strongly recommended to clear at least SError
+       register during error handling.
+       </para>
+     </sect2>
+
+     <sect2 id="exrecRst">
+        <title>Reset</title>
+
+       <para>
+       During EH, resetting is necessary in the following cases.
+       </para>
+
+       <itemizedlist>
+
+       <listitem>
+       <para>
+       HSM is in unknown or invalid state
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       HBA is in unknown or invalid state
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       EH needs to make HBA/device forget about in-flight commands
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       HBA/device behaves weirdly
+       </para>
+       </listitem>
+
+       </itemizedlist>
+
+       <para>
+       Resetting during EH might be a good idea regardless of error
+       condition to improve EH robustness.  Whether to reset both or
+       either one of HBA and device depends on situation but the
+       following scheme is recommended.
+       </para>
+
+       <itemizedlist>
+
+       <listitem>
+       <para>
+       When it's known that HBA is in ready state but ATA/ATAPI
+       device in in unknown state, reset only device.
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       If HBA is in unknown state, reset both HBA and device.
+       </para>
+       </listitem>
+
+       </itemizedlist>
+
+       <para>
+       HBA resetting is implementation specific.  For a controller
+       complying to taskfile/BMDMA PCI IDE, stopping active DMA
+       transaction may be sufficient iff BMDMA state is the only HBA
+       context.  But even mostly taskfile/BMDMA PCI IDE complying
+       controllers may have implementation specific requirements and
+       mechanism to reset themselves.  This must be addressed by
+       specific drivers.
+       </para>
+
+       <para>
+       OTOH, ATA/ATAPI standard describes in detail ways to reset
+       ATA/ATAPI devices.
+       </para>
+
+       <variablelist>
+
+          <varlistentry><term>PATA hardware reset</term>
+          <listitem>
+          <para>
+          This is hardware initiated device reset signalled with
+          asserted PATA RESET- signal.  There is no standard way to
+          initiate hardware reset from software although some
+          hardware provides registers that allow driver to directly
+          tweak the RESET- signal.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>Software reset</term>
+          <listitem>
+          <para>
+          This is achieved by turning CONTROL SRST bit on for at
+          least 5us.  Both PATA and SATA support it but, in case of
+          SATA, this may require controller-specific support as the
+          second Register FIS to clear SRST should be transmitted
+          while BSY bit is still set.  Note that on PATA, this resets
+          both master and slave devices on a channel.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>EXECUTE DEVICE DIAGNOSTIC command</term>
+          <listitem>
+          <para>
+          Although ATA/ATAPI standard doesn't describe exactly, EDD
+          implies some level of resetting, possibly similar level
+          with software reset.  Host-side EDD protocol can be handled
+          with normal command processing and most SATA controllers
+          should be able to handle EDD's just like other commands.
+          As in software reset, EDD affects both devices on a PATA
+          bus.
+          </para>
+          <para>
+          Although EDD does reset devices, this doesn't suit error
+          handling as EDD cannot be issued while BSY is set and it's
+          unclear how it will act when device is in unknown/weird
+          state.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>ATAPI DEVICE RESET command</term>
+          <listitem>
+          <para>
+          This is very similar to software reset except that reset
+          can be restricted to the selected device without affecting
+          the other device sharing the cable.
+          </para>
+          </listitem>
+          </varlistentry>
+
+          <varlistentry><term>SATA phy reset</term>
+          <listitem>
+          <para>
+          This is the preferred way of resetting a SATA device.  In
+          effect, it's identical to PATA hardware reset.  Note that
+          this can be done with the standard SCR Control register.
+          As such, it's usually easier to implement than software
+          reset.
+          </para>
+          </listitem>
+          </varlistentry>
+
+       </variablelist>
+
+       <para>
+       One more thing to consider when resetting devices is that
+       resetting clears certain configuration parameters and they
+       need to be set to their previous or newly adjusted values
+       after reset.
+       </para>
+
+       <para>
+       Parameters affected are.
+       </para>
+
+       <itemizedlist>
+
+       <listitem>
+       <para>
+       CHS set up with INITIALIZE DEVICE PARAMETERS (seldomly used)
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       Parameters set with SET FEATURES including transfer mode setting
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       Block count set with SET MULTIPLE MODE
+       </para>
+       </listitem>
+
+       <listitem>
+       <para>
+       Other parameters (SET MAX, MEDIA LOCK...)
+       </para>
+       </listitem>
+
+       </itemizedlist>
+
+       <para>
+       ATA/ATAPI standard specifies that some parameters must be
+       maintained across hardware or software reset, but doesn't
+       strictly specify all of them.  Always reconfiguring needed
+       parameters after reset is required for robustness.  Note that
+       this also applies when resuming from deep sleep (power-off).
+       </para>
+
+       <para>
+       Also, ATA/ATAPI standard requires that IDENTIFY DEVICE /
+       IDENTIFY PACKET DEVICE is issued after any configuration
+       parameter is updated or a hardware reset and the result used
+       for further operation.  OS driver is required to implement
+       revalidation mechanism to support this.
+       </para>
+
+     </sect2>
+
+     <sect2 id="exrecReconf">
+        <title>Reconfigure transport</title>
+
+       <para>
+       For both PATA and SATA, a lot of corners are cut for cheap
+       connectors, cables or controllers and it's quite common to see
+       high transmission error rate.  This can be mitigated by
+       lowering transmission speed.
+       </para>
+
+       <para>
+       The following is a possible scheme Jeff Garzik suggested.
+       </para>
+
+       <blockquote>
+       <para>
+       If more than $N (3?) transmission errors happen in 15 minutes,
+       </para> 
+       <itemizedlist>
+       <listitem>
+       <para>
+       if SATA, decrease SATA PHY speed.  if speed cannot be decreased,
+       </para>
+       </listitem>
+       <listitem>
+       <para>
+       decrease UDMA xfer speed.  if at UDMA0, switch to PIO4,
+       </para>
+       </listitem>
+       <listitem>
+       <para>
+       decrease PIO xfer speed.  if at PIO3, complain, but continue
+       </para>
+       </listitem>
+       </itemizedlist>
+       </blockquote>
+
+     </sect2>
+
+  </sect1>
+
+  </chapter>
+
   <chapter id="PiixInt">
      <title>ata_piix Internals</title>
 !Idrivers/scsi/ata_piix.c
index 6dd274d7e1cfb6562e2001787b2fb1ec757f8962..2d65c2182161723d97ce3b2d3f81f157fed1af69 100644 (file)
@@ -906,9 +906,20 @@ Aside:
 
 
 4. The I/O scheduler
-I/O schedulers are now per queue. They should be runtime switchable and modular
-but aren't yet. Jens has most bits to do this, but the sysfs implementation is
-missing.
+I/O scheduler, a.k.a. elevator, is implemented in two layers.  Generic dispatch
+queue and specific I/O schedulers.  Unless stated otherwise, elevator is used
+to refer to both parts and I/O scheduler to specific I/O schedulers.
+
+Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c.
+The generic dispatch queue is responsible for properly ordering barrier
+requests, requeueing, handling non-fs requests and all other subtleties.
+
+Specific I/O schedulers are responsible for ordering normal filesystem
+requests.  They can also choose to delay certain requests to improve
+throughput or whatever purpose.  As the plural form indicates, there are
+multiple I/O schedulers.  They can be built as modules but at least one should
+be built inside the kernel.  Each queue can choose different one and can also
+change to another one dynamically.
 
 A block layer call to the i/o scheduler follows the convention elv_xxx(). This
 calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh,
@@ -921,44 +932,36 @@ keeping work.
 The functions an elevator may implement are: (* are mandatory)
 elevator_merge_fn              called to query requests for merge with a bio
 
-elevator_merge_req_fn          " " "  with another request
+elevator_merge_req_fn          called when two requests get merged. the one
+                               which gets merged into the other one will be
+                               never seen by I/O scheduler again. IOW, after
+                               being merged, the request is gone.
 
 elevator_merged_fn             called when a request in the scheduler has been
                                involved in a merge. It is used in the deadline
                                scheduler for example, to reposition the request
                                if its sorting order has changed.
 
-*elevator_next_req_fn          returns the next scheduled request, or NULL
-                               if there are none (or none are ready).
+elevator_dispatch_fn           fills the dispatch queue with ready requests.
+                               I/O schedulers are free to postpone requests by
+                               not filling the dispatch queue unless @force
+                               is non-zero.  Once dispatched, I/O schedulers
+                               are not allowed to manipulate the requests -
+                               they belong to generic dispatch queue.
 
-*elevator_add_req_fn           called to add a new request into the scheduler
+elevator_add_req_fn            called to add a new request into the scheduler
 
 elevator_queue_empty_fn                returns true if the merge queue is empty.
                                Drivers shouldn't use this, but rather check
                                if elv_next_request is NULL (without losing the
                                request if one exists!)
 
-elevator_remove_req_fn         This is called when a driver claims ownership of
-                               the target request - it now belongs to the
-                               driver. It must not be modified or merged.
-                               Drivers must not lose the request! A subsequent
-                               call of elevator_next_req_fn must  return the
-                               _next_ request.
-
-elevator_requeue_req_fn                called to add a request to the scheduler. This
-                               is used when the request has alrnadebeen
-                               returned by elv_next_request, but hasn't
-                               completed. If this is not implemented then
-                               elevator_add_req_fn is called instead.
-
 elevator_former_req_fn
 elevator_latter_req_fn         These return the request before or after the
                                one specified in disk sort order. Used by the
                                block layer to find merge possibilities.
 
-elevator_completed_req_fn      called when a request is completed. This might
-                               come about due to being merged with another or
-                               when the device completes the request.
+elevator_completed_req_fn      called when a request is completed.
 
 elevator_may_queue_fn          returns true if the scheduler wants to allow the
                                current context to queue a new request even if
@@ -967,13 +970,33 @@ elevator_may_queue_fn             returns true if the scheduler wants to allow the
 
 elevator_set_req_fn
 elevator_put_req_fn            Must be used to allocate and free any elevator
-                               specific storate for a request.
+                               specific storage for a request.
+
+elevator_activate_req_fn       Called when device driver first sees a request.
+                               I/O schedulers can use this callback to
+                               determine when actual execution of a request
+                               starts.
+elevator_deactivate_req_fn     Called when device driver decides to delay
+                               a request by requeueing it.
 
 elevator_init_fn
 elevator_exit_fn               Allocate and free any elevator specific storage
                                for a queue.
 
-4.2 I/O scheduler implementation
+4.2 Request flows seen by I/O schedulers
+All requests seens by I/O schedulers strictly follow one of the following three
+flows.
+
+ set_req_fn ->
+
+ i.   add_req_fn -> (merged_fn ->)* -> dispatch_fn -> activate_req_fn ->
+      (deactivate_req_fn -> activate_req_fn ->)* -> completed_req_fn
+ ii.  add_req_fn -> (merged_fn ->)* -> merge_req_fn
+ iii. [none]
+
+ -> put_req_fn
+
+4.3 I/O scheduler implementation
 The generic i/o scheduler algorithm attempts to sort/merge/batch requests for
 optimal disk scan and request servicing performance (based on generic
 principles and device capabilities), optimized for:
@@ -993,18 +1016,7 @@ request in sort order to prevent binary tree lookups.
 This arrangement is not a generic block layer characteristic however, so
 elevators may implement queues as they please.
 
-ii. Last merge hint
-The last merge hint is part of the generic queue layer. I/O schedulers must do
-some management on it. For the most part, the most important thing is to make
-sure q->last_merge is cleared (set to NULL) when the request on it is no longer
-a candidate for merging (for example if it has been sent to the driver).
-
-The last merge performed is cached as a hint for the subsequent request. If
-sequential data is being submitted, the hint is used to perform merges without
-any scanning. This is not sufficient when there are multiple processes doing
-I/O though, so a "merge hash" is used by some schedulers.
-
-iii. Merge hash
+ii. Merge hash
 AS and deadline use a hash table indexed by the last sector of a request. This
 enables merging code to quickly look up "back merge" candidates, even when
 multiple I/O streams are being performed at once on one disk.
@@ -1013,29 +1025,8 @@ multiple I/O streams are being performed at once on one disk.
 are far less common than "back merges" due to the nature of most I/O patterns.
 Front merges are handled by the binary trees in AS and deadline schedulers.
 
-iv. Handling barrier cases
-A request with flags REQ_HARDBARRIER or REQ_SOFTBARRIER must not be ordered
-around. That is, they must be processed after all older requests, and before
-any newer ones. This includes merges!
-
-In AS and deadline schedulers, barriers have the effect of flushing the reorder
-queue. The performance cost of this will vary from nothing to a lot depending
-on i/o patterns and device characteristics. Obviously they won't improve
-performance, so their use should be kept to a minimum.
-
-v. Handling insertion position directives
-A request may be inserted with a position directive. The directives are one of
-ELEVATOR_INSERT_BACK, ELEVATOR_INSERT_FRONT, ELEVATOR_INSERT_SORT.
-
-ELEVATOR_INSERT_SORT is a general directive for non-barrier requests.
-ELEVATOR_INSERT_BACK is used to insert a barrier to the back of the queue.
-ELEVATOR_INSERT_FRONT is used to insert a barrier to the front of the queue, and
-overrides the ordering requested by any previous barriers. In practice this is
-harmless and required, because it is used for SCSI requeueing. This does not
-require flushing the reorder queue, so does not impose a performance penalty.
-
-vi. Plugging the queue to batch requests in anticipation of opportunities for
-  merge/sort optimizations
+iii. Plugging the queue to batch requests in anticipation of opportunities for
+     merge/sort optimizations
 
 This is just the same as in 2.4 so far, though per-device unplugging
 support is anticipated for 2.5. Also with a priority-based i/o scheduler,
@@ -1069,7 +1060,7 @@ Aside:
   blk_kick_queue() to unplug a specific queue (right away ?)
   or optionally, all queues, is in the plan.
 
-4.3 I/O contexts
+4.4 I/O contexts
 I/O contexts provide a dynamically allocated per process data area. They may
 be used in I/O schedulers, and in the block layer (could be used for IO statis,
 priorities for example). See *io_context in drivers/block/ll_rw_blk.c, and
index a55f0f95b171a3bbe80b5e19c535a2707e957f64..b0fe41da007bf847e8ae2da4c4b2b840461d9353 100644 (file)
@@ -777,7 +777,7 @@ doing so is the same as described in the "Configuring Multiple Bonds
 Manually" section, below.
 
        NOTE: It has been observed that some Red Hat supplied kernels
-are apparently unable to rename modules at load time (the "-obonding1"
+are apparently unable to rename modules at load time (the "-o bond1"
 part).  Attempts to pass that option to modprobe will produce an
 "Operation not permitted" error.  This has been reported on some
 Fedora Core kernels, and has been seen on RHEL 4 as well.  On kernels
@@ -883,7 +883,8 @@ the above does not work, and the second bonding instance never sees
 its options.  In that case, the second options line can be substituted
 as follows:
 
-install bonding1 /sbin/modprobe bonding -obond1 mode=balance-alb miimon=50
+install bond1 /sbin/modprobe --ignore-install bonding -o bond1 \
+       mode=balance-alb miimon=50
 
        This may be repeated any number of times, specifying a new and
 unique name in place of bond1 for each subsequent instance.
index 1fa7e5343464d69373858c598154da813e66285d..f1d121f23025c664ebd5e6bbebb29c62bce49c5b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -334,7 +334,7 @@ KALLSYMS    = scripts/kallsyms
 PERL           = perl
 CHECK          = sparse
 
-CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ $(CF)
+CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(CF)
 MODFLAGS       = -DMODULE
 CFLAGS_MODULE   = $(MODFLAGS)
 AFLAGS_MODULE   = $(MODFLAGS)
index 582a3519fb28d24bbf42dd8e1cb46c700b5f2c22..9903e3a79102486184dc66e282fab60a9048fe6d 100644 (file)
@@ -154,7 +154,7 @@ pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
 
 void *
 dma_alloc_coherent(struct device *dev, size_t size,
-                  dma_addr_t *dma_handle, int gfp)
+                  dma_addr_t *dma_handle, gfp_t gfp)
 {
        void *ret;
 
index 7cb23f12ecbd03a15cd8a6d52f84d887f96736df..c468e312e5f815bd67b19c7354bf65422ea51d4b 100644 (file)
@@ -397,7 +397,7 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
 {
        void *cpu_addr;
        long order = get_order(size);
-       int gfp = GFP_ATOMIC;
+       gfp_t gfp = GFP_ATOMIC;
 
 try_again:
        cpu_addr = (void *)__get_free_pages(gfp, order);
index 11fff042aa817d022139947e82093d24a3e567a4..682367bd0f653d63a19b7120959e18cc3d1da383 100644 (file)
@@ -204,6 +204,7 @@ config ARCH_H720X
 
 config ARCH_AAEC2000
        bool "Agilent AAEC-2000 based"
+       select ARM_AMBA
        help
          This enables support for systems based on the Agilent AAEC-2000
 
@@ -687,7 +688,8 @@ source "drivers/acorn/block/Kconfig"
 
 if PCMCIA || ARCH_CLPS7500 || ARCH_IOP3XX || ARCH_IXP4XX \
        || ARCH_L7200 || ARCH_LH7A40X || ARCH_PXA || ARCH_RPC \
-       || ARCH_S3C2410 || ARCH_SA1100 || ARCH_SHARK || FOOTBRIDGE
+       || ARCH_S3C2410 || ARCH_SA1100 || ARCH_SHARK || FOOTBRIDGE \
+       || MACH_MP1000
 source "drivers/ide/Kconfig"
 endif
 
index 7c7f475e213ea3f85779165679e06feb1aea7109..a54d2eb648920554bd975a5f52773dcbac5746c0 100644 (file)
@@ -39,7 +39,8 @@
     defined(CONFIG_ARCH_IXP4XX) || \
     defined(CONFIG_ARCH_IXP2000) || \
     defined(CONFIG_ARCH_LH7A40X) || \
-    defined(CONFIG_ARCH_OMAP)
+    defined(CONFIG_ARCH_OMAP) || \
+    defined(CONFIG_MACH_MP1000)
                .macro  loadsp, rb
                addruart \rb
                .endm
diff --git a/arch/arm/configs/mp1000_defconfig b/arch/arm/configs/mp1000_defconfig
new file mode 100644 (file)
index 0000000..d2cbc6f
--- /dev/null
@@ -0,0 +1,897 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.14-rc1
+# Fri Sep 16 15:48:13 2005
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+# CONFIG_CLEAN_COMPILE is not set
+CONFIG_BROKEN=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+CONFIG_ARCH_CLPS711X=y
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+
+#
+# CLPS711X/EP721X Implementations
+#
+# CONFIG_ARCH_AUTCPU12 is not set
+# CONFIG_ARCH_CDB89712 is not set
+# CONFIG_ARCH_CEIVA is not set
+# CONFIG_ARCH_CLEP7312 is not set
+# CONFIG_ARCH_EDB7211 is not set
+# CONFIG_ARCH_P720T is not set
+# CONFIG_ARCH_FORTUNET is not set
+CONFIG_MACH_MP1000=y
+CONFIG_MP1000_90MHZ=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM720T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_LV4T=y
+CONFIG_CPU_CACHE_V4=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WT=y
+CONFIG_CPU_TLB_V4WT=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyCL,38400 root=/dev/discs/disc0/part1 ip=any cs89x0_media=rj45"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=3
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=m
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+# CONFIG_MTD_CFI_I1 is not set
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+CONFIG_MTD_CFI_INTELEXT=m
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=m
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP_START=0x0000000
+CONFIG_MTD_PHYSMAP_LEN=0x4000000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+CONFIG_MTD_EDB7312=m
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+CONFIG_MTD_NAND_MP1000=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+CONFIG_IDE_ARM=y
+CONFIG_BLK_DEV_IDE_MP1000=y
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_CRYPT is not set
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+CONFIG_CS89x0=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+CONFIG_INPUT_EVBUG=y
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CLPS711X=y
+CONFIG_SERIAL_CLPS711X_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_NVRAM=y
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB is not set
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+CONFIG_FRAME_POINTER=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_WAITQ=y
+CONFIG_DEBUG_ERRORS=y
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_CLPS711X_UART2 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_DEFLATE=m
index 1a85cfdad5acabcce57a96b7d35aacf0a50a5093..6055e1427ba35819132623b297583cb2484215c6 100644 (file)
@@ -11,6 +11,7 @@
  */
 #include <linux/config.h>
 #include <linux/module.h>
+#include <linux/moduleloader.h>
 #include <linux/kernel.h>
 #include <linux/elf.h>
 #include <linux/vmalloc.h>
index f6de76e0a45d388e1974f2015f2e3ba7de6c6094..baa09601a64ee5c908ad356b51bc8109ff574cdc 100644 (file)
@@ -345,7 +345,9 @@ static int bad_syscall(int n, struct pt_regs *regs)
        struct thread_info *thread = current_thread_info();
        siginfo_t info;
 
-       if (current->personality != PER_LINUX && thread->exec_domain->handler) {
+       if (current->personality != PER_LINUX &&
+           current->personality != PER_LINUX_32BIT &&
+           thread->exec_domain->handler) {
                thread->exec_domain->handler(n, regs);
                return regs->ARM_r0;
        }
index 8725d63e4219801eadf0b4c7704b5384d4077af4..71e5b99e519ea07c0969ea8ae38e57e8b6f0bf9e 100644 (file)
@@ -11,7 +11,7 @@ lib-y         := backtrace.o changebit.o csumipv6.o csumpartial.o   \
                   strnlen_user.o strchr.o strrchr.o testchangebit.o  \
                   testclearbit.o testsetbit.o uaccess.o getuser.o    \
                   putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o   \
-                  ucmpdi2.o lib1funcs.o div64.o                      \
+                  ucmpdi2.o lib1funcs.o div64.o sha1.o               \
                   io-readsb.o io-writesb.o io-readsl.o io-writesl.o
 
 ifeq ($(CONFIG_CPU_32v3),y)
diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S
new file mode 100644 (file)
index 0000000..ff6ece4
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ *  linux/arch/arm/lib/sha1.S
+ *
+ *  SHA transform optimized for ARM
+ *
+ *  Copyright: (C) 2005 by Nicolas Pitre <nico@cam.org>
+ *  Created:   September 17, 2005
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ *  The reference implementation for this code is linux/lib/sha1.c
+ */
+
+#include <linux/linkage.h>
+
+       .text
+
+
+/*
+ * void sha_transform(__u32 *digest, const char *in, __u32 *W)
+ *
+ * Note: the "in" ptr may be unaligned.
+ */
+
+ENTRY(sha_transform)
+
+       stmfd   sp!, {r4 - r8, lr}
+
+       @ for (i = 0; i < 16; i++)
+       @         W[i] = be32_to_cpu(in[i]); */
+
+#ifdef __ARMEB__
+       mov     r4, r0
+       mov     r0, r2
+       mov     r2, #64
+       bl      memcpy
+       mov     r2, r0
+       mov     r0, r4
+#else
+       mov     r3, r2
+       mov     lr, #16
+1:     ldrb    r4, [r1], #1
+       ldrb    r5, [r1], #1
+       ldrb    r6, [r1], #1
+       ldrb    r7, [r1], #1
+       subs    lr, lr, #1
+       orr     r5, r5, r4, lsl #8
+       orr     r6, r6, r5, lsl #8
+       orr     r7, r7, r6, lsl #8
+       str     r7, [r3], #4
+       bne     1b
+#endif
+
+       @ for (i = 0; i < 64; i++)
+       @         W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31);
+
+       sub     r3, r2, #4
+       mov     lr, #64
+2:     ldr     r4, [r3, #4]!
+       subs    lr, lr, #1
+       ldr     r5, [r3, #8]
+       ldr     r6, [r3, #32]
+       ldr     r7, [r3, #52]
+       eor     r4, r4, r5
+       eor     r4, r4, r6
+       eor     r4, r4, r7
+       mov     r4, r4, ror #31
+       str     r4, [r3, #64]
+       bne     2b
+
+       /*
+        * The SHA functions are:
+        *
+        * f1(B,C,D) = (D ^ (B & (C ^ D)))
+        * f2(B,C,D) = (B ^ C ^ D)
+        * f3(B,C,D) = ((B & C) | (D & (B | C)))
+        *
+        * Then the sub-blocks are processed as follows:
+        *
+        * A' = ror(A, 27) + f(B,C,D) + E + K + *W++
+        * B' = A
+        * C' = ror(B, 2)
+        * D' = C
+        * E' = D
+        *
+        * We therefore unroll each loop 5 times to avoid register shuffling.
+        * Also the ror for C (and also D and E which are successivelyderived
+        * from it) is applied in place to cut on an additional mov insn for
+        * each round.
+        */
+
+       .macro  sha_f1, A, B, C, D, E
+       ldr     r3, [r2], #4
+       eor     ip, \C, \D
+       add     \E, r1, \E, ror #2
+       and     ip, \B, ip, ror #2
+       add     \E, \E, \A, ror #27
+       eor     ip, ip, \D, ror #2
+       add     \E, \E, r3
+       add     \E, \E, ip
+       .endm
+
+       .macro  sha_f2, A, B, C, D, E
+       ldr     r3, [r2], #4
+       add     \E, r1, \E, ror #2
+       eor     ip, \B, \C, ror #2
+       add     \E, \E, \A, ror #27
+       eor     ip, ip, \D, ror #2
+       add     \E, \E, r3
+       add     \E, \E, ip
+       .endm
+
+       .macro  sha_f3, A, B, C, D, E
+       ldr     r3, [r2], #4
+       add     \E, r1, \E, ror #2
+       orr     ip, \B, \C, ror #2
+       add     \E, \E, \A, ror #27
+       and     ip, ip, \D, ror #2
+       add     \E, \E, r3
+       and     r3, \B, \C, ror #2
+       orr     ip, ip, r3
+       add     \E, \E, ip
+       .endm
+
+       ldmia   r0, {r4 - r8}
+
+       mov     lr, #4
+       ldr     r1, .L_sha_K + 0
+
+       /* adjust initial values */
+       mov     r6, r6, ror #30
+       mov     r7, r7, ror #30
+       mov     r8, r8, ror #30
+
+3:     subs    lr, lr, #1
+       sha_f1  r4, r5, r6, r7, r8
+       sha_f1  r8, r4, r5, r6, r7
+       sha_f1  r7, r8, r4, r5, r6
+       sha_f1  r6, r7, r8, r4, r5
+       sha_f1  r5, r6, r7, r8, r4
+       bne     3b
+
+       ldr     r1, .L_sha_K + 4
+       mov     lr, #4
+
+4:     subs    lr, lr, #1
+       sha_f2  r4, r5, r6, r7, r8
+       sha_f2  r8, r4, r5, r6, r7
+       sha_f2  r7, r8, r4, r5, r6
+       sha_f2  r6, r7, r8, r4, r5
+       sha_f2  r5, r6, r7, r8, r4
+       bne     4b
+
+       ldr     r1, .L_sha_K + 8
+       mov     lr, #4
+
+5:     subs    lr, lr, #1
+       sha_f3  r4, r5, r6, r7, r8
+       sha_f3  r8, r4, r5, r6, r7
+       sha_f3  r7, r8, r4, r5, r6
+       sha_f3  r6, r7, r8, r4, r5
+       sha_f3  r5, r6, r7, r8, r4
+       bne     5b
+
+       ldr     r1, .L_sha_K + 12
+       mov     lr, #4
+
+6:     subs    lr, lr, #1
+       sha_f2  r4, r5, r6, r7, r8
+       sha_f2  r8, r4, r5, r6, r7
+       sha_f2  r7, r8, r4, r5, r6
+       sha_f2  r6, r7, r8, r4, r5
+       sha_f2  r5, r6, r7, r8, r4
+       bne     6b
+
+       ldmia   r0, {r1, r2, r3, ip, lr}
+       add     r4, r1, r4
+       add     r5, r2, r5
+       add     r6, r3, r6, ror #2
+       add     r7, ip, r7, ror #2
+       add     r8, lr, r8, ror #2
+       stmia   r0, {r4 - r8}
+
+       ldmfd   sp!, {r4 - r8, pc}
+
+.L_sha_K:
+       .word   0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
+
+
+/*
+ * void sha_init(__u32 *buf)
+ */
+
+.L_sha_initial_digest:
+       .word   0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0
+
+ENTRY(sha_init)
+
+       str     lr, [sp, #-4]!
+       adr     r1, .L_sha_initial_digest
+       ldmia   r1, {r1, r2, r3, ip, lr}
+       stmia   r0, {r1, r2, r3, ip, lr}
+       ldr     pc, [sp], #4
+
index 20ec83896c375992bfcae3c00c6b3952ae975643..a8e462f58bc9c4087cd064a7560377b85d8d938d 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common support (must be linked before board specific support)
-obj-y += core.o
+obj-y += core.o clock.o
 
 # Specific board support
 obj-$(CONFIG_MACH_AAED2000) += aaed2000.o
index c9d89988664857da50dc1fcb2576813faa4ebde7..f5ef697022962fe7d95117cc44a43b662d40233c 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
 
+#include <asm/arch/aaed2000.h>
+
 #include "core.h"
 
+static void aaed2000_clcd_disable(struct clcd_fb *fb)
+{
+       AAED_EXT_GPIO &= ~AAED_EGPIO_LCD_PWR_EN;
+}
+
+static void aaed2000_clcd_enable(struct clcd_fb *fb)
+{
+       AAED_EXT_GPIO |= AAED_EGPIO_LCD_PWR_EN;
+}
+
+struct aaec2000_clcd_info clcd_info = {
+       .enable = aaed2000_clcd_enable,
+       .disable = aaed2000_clcd_disable,
+       .panel = {
+               .mode   = {
+                       .name           = "Sharp",
+                       .refresh        = 60,
+                       .xres           = 640,
+                       .yres           = 480,
+                       .pixclock       = 39721,
+                       .left_margin    = 20,
+                       .right_margin   = 44,
+                       .upper_margin   = 21,
+                       .lower_margin   = 34,
+                       .hsync_len      = 96,
+                       .vsync_len      = 2,
+                       .sync           = 0,
+                       .vmode  = FB_VMODE_NONINTERLACED,
+               },
+               .width  = -1,
+               .height = -1,
+               .tim2   = TIM2_IVS | TIM2_IHS,
+               .cntl   = CNTL_LCDTFT,
+               .bpp    = 16,
+       },
+};
+
 static void __init aaed2000_init_irq(void)
 {
        aaec2000_init_irq();
 }
 
+static void __init aaed2000_init(void)
+{
+       aaec2000_set_clcd_plat_data(&clcd_info);
+}
+
+static struct map_desc aaed2000_io_desc[] __initdata = {
+  { EXT_GPIO_VBASE, EXT_GPIO_PBASE, EXT_GPIO_LENGTH, MT_DEVICE }, /* Ext GPIO */
+};
+
 static void __init aaed2000_map_io(void)
 {
        aaec2000_map_io();
+       iotable_init(aaed2000_io_desc, ARRAY_SIZE(aaed2000_io_desc));
 }
 
 MACHINE_START(AAED2000, "Agilent AAED-2000 Development Platform")
@@ -47,4 +96,5 @@ MACHINE_START(AAED2000, "Agilent AAED-2000 Development Platform")
        .map_io         = aaed2000_map_io,
        .init_irq       = aaed2000_init_irq,
        .timer          = &aaec2000_timer,
+       .init_machine   = aaed2000_init,
 MACHINE_END
diff --git a/arch/arm/mach-aaec2000/clock.c b/arch/arm/mach-aaec2000/clock.c
new file mode 100644 (file)
index 0000000..99e0191
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ *  linux/arch/arm/mach-aaec2000/clock.c
+ *
+ *  Copyright (C) 2005 Nicolas Bellido Y Ortega
+ *
+ *  Based on linux/arch/arm/mach-integrator/clock.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include <asm/semaphore.h>
+#include <asm/hardware/clock.h>
+
+#include "clock.h"
+
+static LIST_HEAD(clocks);
+static DECLARE_MUTEX(clocks_sem);
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+       struct clk *p, *clk = ERR_PTR(-ENOENT);
+
+       down(&clocks_sem);
+       list_for_each_entry(p, &clocks, node) {
+               if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
+                       clk = p;
+                       break;
+               }
+       }
+       up(&clocks_sem);
+
+       return clk;
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+       module_put(clk->owner);
+}
+EXPORT_SYMBOL(clk_put);
+
+int clk_enable(struct clk *clk)
+{
+       return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+int clk_use(struct clk *clk)
+{
+       return 0;
+}
+EXPORT_SYMBOL(clk_use);
+
+void clk_unuse(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_unuse);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+       return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+       return rate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+       return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_register(struct clk *clk)
+{
+       down(&clocks_sem);
+       list_add(&clk->node, &clocks);
+       up(&clocks_sem);
+       return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+       down(&clocks_sem);
+       list_del(&clk->node);
+       up(&clocks_sem);
+}
+EXPORT_SYMBOL(clk_unregister);
+
+static int __init clk_init(void)
+{
+       return 0;
+}
+arch_initcall(clk_init);
diff --git a/arch/arm/mach-aaec2000/clock.h b/arch/arm/mach-aaec2000/clock.h
new file mode 100644 (file)
index 0000000..d4bb74f
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ *  linux/arch/arm/mach-aaec2000/clock.h
+ *
+ *  Copyright (C) 2005 Nicolas Bellido Y Ortega
+ *
+ *  Based on linux/arch/arm/mach-integrator/clock.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+struct module;
+
+struct clk {
+       struct list_head        node;
+       unsigned long           rate;
+       struct module           *owner;
+       const char              *name;
+       void                    *data;
+};
+
+int clk_register(struct clk *clk);
+void clk_unregister(struct clk *clk);
index aece0cd4f0a3f324cb203476db3d0bc9921794a1..0c53dab8090593028381a6c23cdf265779745eb6 100644 (file)
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/device.h>
 #include <linux/list.h>
 #include <linux/errno.h>
+#include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/timex.h>
 #include <linux/signal.h>
 
 #include <asm/hardware.h>
 #include <asm/irq.h>
+#include <asm/sizes.h>
+#include <asm/hardware/amba.h>
 
+#include <asm/mach/flash.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 #include <asm/mach/map.h>
 
+#include "core.h"
+#include "clock.h"
+
 /*
  * Common I/O mapping:
  *
  * default mapping provided here.
  */
 static struct map_desc standard_io_desc[] __initdata = {
- /* virtual         physical       length           type */
-  { VIO_APB_BASE,   PIO_APB_BASE,  IO_APB_LENGTH,   MT_DEVICE },
-  { VIO_AHB_BASE,   PIO_AHB_BASE,  IO_AHB_LENGTH,   MT_DEVICE }
+       {
+               .virtual        = VIO_APB_BASE,
+               .physical       = __phys_to_pfn(PIO_APB_BASE),
+               .length         = IO_APB_LENGTH,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = VIO_AHB_BASE,
+               .physical       = __phys_to_pfn(PIO_AHB_BASE),
+               .length         = IO_AHB_LENGTH,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init aaec2000_map_io(void)
@@ -155,3 +171,116 @@ struct sys_timer aaec2000_timer = {
        .offset         = aaec2000_gettimeoffset,
 };
 
+static struct clcd_panel mach_clcd_panel;
+
+static int aaec2000_clcd_setup(struct clcd_fb *fb)
+{
+       dma_addr_t dma;
+
+       fb->panel = &mach_clcd_panel;
+
+       fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, SZ_1M,
+                       &dma, GFP_KERNEL);
+
+       if (!fb->fb.screen_base) {
+               printk(KERN_ERR "CLCD: unable to map framebuffer\n");
+               return -ENOMEM;
+       }
+
+       fb->fb.fix.smem_start = dma;
+       fb->fb.fix.smem_len = SZ_1M;
+
+       return 0;
+}
+
+static int aaec2000_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+       return dma_mmap_writecombine(&fb->dev->dev, vma,
+                       fb->fb.screen_base,
+                       fb->fb.fix.smem_start,
+                       fb->fb.fix.smem_len);
+}
+
+static void aaec2000_clcd_remove(struct clcd_fb *fb)
+{
+       dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
+                       fb->fb.screen_base, fb->fb.fix.smem_start);
+}
+
+static struct clcd_board clcd_plat_data = {
+       .name   = "AAEC-2000",
+       .check  = clcdfb_check,
+       .decode = clcdfb_decode,
+       .setup  = aaec2000_clcd_setup,
+       .mmap   = aaec2000_clcd_mmap,
+       .remove = aaec2000_clcd_remove,
+};
+
+static struct amba_device clcd_device = {
+       .dev            = {
+               .bus_id                 = "mb:16",
+               .coherent_dma_mask      = ~0,
+               .platform_data          = &clcd_plat_data,
+       },
+       .res            = {
+               .start                  = AAEC_CLCD_PHYS,
+               .end                    = AAEC_CLCD_PHYS + SZ_4K - 1,
+               .flags                  = IORESOURCE_MEM,
+       },
+       .irq            = { INT_LCD, NO_IRQ },
+       .periphid       = 0x41110,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+       &clcd_device,
+};
+
+static struct clk aaec2000_clcd_clk = {
+       .name = "CLCDCLK",
+};
+
+void __init aaec2000_set_clcd_plat_data(struct aaec2000_clcd_info *clcd)
+{
+       clcd_plat_data.enable = clcd->enable;
+       clcd_plat_data.disable = clcd->disable;
+       memcpy(&mach_clcd_panel, &clcd->panel, sizeof(struct clcd_panel));
+}
+
+static struct flash_platform_data aaec2000_flash_data = {
+       .map_name       = "cfi_probe",
+       .width          = 4,
+};
+
+static struct resource aaec2000_flash_resource = {
+       .start          = AAEC_FLASH_BASE,
+       .end            = AAEC_FLASH_BASE + AAEC_FLASH_SIZE,
+       .flags          = IORESOURCE_MEM,
+};
+
+static struct platform_device aaec2000_flash_device = {
+       .name           = "armflash",
+       .id             = 0,
+       .dev            = {
+               .platform_data  = &aaec2000_flash_data,
+       },
+       .num_resources  = 1,
+       .resource       = &aaec2000_flash_resource,
+};
+
+static int __init aaec2000_init(void)
+{
+       int i;
+
+       clk_register(&aaec2000_clcd_clk);
+
+       for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
+               struct amba_device *d = amba_devs[i];
+               amba_device_register(d, &iomem_resource);
+       }
+
+       platform_device_register(&aaec2000_flash_device);
+
+       return 0;
+};
+arch_initcall(aaec2000_init);
+
index 91893d848c165e67ff16079e9dbac8d4c626f4bf..daefc0ea14a10236c8aca9e068dda22f6cd24540 100644 (file)
@@ -9,8 +9,19 @@
  *
  */
 
+#include <asm/hardware/amba_clcd.h>
+
 struct sys_timer;
 
 extern struct sys_timer aaec2000_timer;
 extern void __init aaec2000_map_io(void);
 extern void __init aaec2000_init_irq(void);
+
+struct aaec2000_clcd_info {
+       struct clcd_panel panel;
+       void (*disable)(struct clcd_fb *);
+       void (*enable)(struct clcd_fb *);
+};
+
+extern void __init aaec2000_set_clcd_plat_data(struct aaec2000_clcd_info *);
+
index 0793dcf54f2e51dd30be757962db1483824a1d9d..d5c155045762cfffc2d685601bc01870aadf64d6 100644 (file)
@@ -69,6 +69,17 @@ config EP72XX_ROM_BOOT
 
          You almost surely want to say N here.
 
+config MACH_MP1000
+       bool "MACH_MP1000"
+       help
+         Say Y if you intend to run the kernel on the Comdial MP1000 platform.
+
+config MP1000_90MHZ
+       bool "MP1000_90MHZ"
+       depends on MACH_MP1000
+       help
+         Say Y if you have the MP1000 configured to be set at 90MHZ rather than 74MHZ
+
 endmenu
 
 endif
index 4a197315f0cf0c6c03c3d5b4f92acf14408039f2..8a6dc1ccf8feef8c59f7f4def9a4583c9c11b59b 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_ARCH_CDB89712) += cdb89712.o
 obj-$(CONFIG_ARCH_CLEP7312) += clep7312.o
 obj-$(CONFIG_ARCH_EDB7211)  += edb7211-arch.o edb7211-mm.o
 obj-$(CONFIG_ARCH_FORTUNET) += fortunet.o
+obj-$(CONFIG_MACH_MP1000)   += mp1000-mach.o mp1000-mm.o mp1000-seprom.o
 obj-$(CONFIG_ARCH_P720T)    += p720t.o
 leds-$(CONFIG_ARCH_P720T)   += p720t-leds.o
 obj-$(CONFIG_LEDS)          += $(leds-y)
index dc73feb1ffb0d9ed58b90a64a1518525cb63b17d..43b9423d1440147e0d70d661142ad0c3e63da2c2 100644 (file)
 */
 
 static struct map_desc autcpu12_io_desc[] __initdata = {
- /* virtual, physical, length, type */
- /* memory-mapped extra io and CS8900A Ethernet chip */
- /* ethernet chip */
-       { AUTCPU12_VIRT_CS8900A, AUTCPU12_PHYS_CS8900A, SZ_1M, MT_DEVICE }
+       /* memory-mapped extra io and CS8900A Ethernet chip */
+       /* ethernet chip */
+       {
+               .virtual        = AUTCPU12_VIRT_CS8900A,
+               .pfn            = __phys_to_pfn(AUTCPU12_PHYS_CS8900A),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init autcpu12_map_io(void)
index a46c82cd2711d5a6c150a58a628acd499416d5c9..cba7be5a06c34423fa311ceef465267ddc7aa1d7 100644 (file)
  * ethernet driver, perhaps.
  */
 static struct map_desc cdb89712_io_desc[] __initdata = {
-       { ETHER_BASE, ETHER_START, ETHER_SIZE, MT_DEVICE }
+       {
+               .virtual        = ETHER_BASE,
+               .pfn            =__phys_to_pfn(ETHER_START),
+               .length         = ETHER_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init cdb89712_map_io(void)
index 780d918059848c1d6f9d6caff8f27026d10df11f..35d51a759b592c359e86aa7277a3444bce512279 100644 (file)
 #include "common.h"
 
 static struct map_desc ceiva_io_desc[] __initdata = {
- /* virtual, physical, length, type */
-
- /* SED1355 controlled video RAM & registers */
- { CEIVA_VIRT_SED1355, CEIVA_PHYS_SED1355, SZ_2M, MT_DEVICE }
-
+       /* SED1355 controlled video RAM & registers */
+       {
+               .virtual        = CEIVA_VIRT_SED1355,
+               .pfn            = __phys_to_pfn(CEIVA_PHYS_SED1355),
+               .length         = SZ_2M,
+               .type           = MT_DEVICE
+       }
 };
 
 
index 7fd7b01822d0806d910948dd532ce002db2f719d..72f8bb05d55e896c9ccbacc7cec82c3a2d2142a8 100644 (file)
@@ -51,15 +51,27 @@ extern void clps711x_map_io(void);
  *     happens).
  */
 static struct map_desc edb7211_io_desc[] __initdata = {
- /* virtual, physical, length, type */
-
- /* memory-mapped extra keyboard row and CS8900A Ethernet chip */
- { EP7211_VIRT_EXTKBD,  EP7211_PHYS_EXTKBD,  SZ_1M, MT_DEVICE }, 
- { EP7211_VIRT_CS8900A, EP7211_PHYS_CS8900A, SZ_1M, MT_DEVICE },
-
- /* flash banks */
- { EP7211_VIRT_FLASH1,  EP7211_PHYS_FLASH1,  SZ_8M, MT_DEVICE },
- { EP7211_VIRT_FLASH2,  EP7211_PHYS_FLASH2,  SZ_8M, MT_DEVICE }
+       {       /* memory-mapped extra keyboard row */
+               .virtual        = EP7211_VIRT_EXTKBD,
+               .pfn            = __phys_to_pfn(EP7211_PHYS_EXTKBD),
+               .length         = SZ_1M,
+               .type           - MT_DEVICE
+       }, {    /* and CS8900A Ethernet chip */
+               .virtual        = EP7211_VIRT_CS8900A,
+               .pfn            = __phys_to_pfn(EP7211_PHYS_CS8900A),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }, {    /* flash banks */
+               .virtual        = EP7211_VIRT_FLASH1,
+               .pfn            = __phys_to_pfn(EP7211_PHYS_FLASH1),
+               .length         = SZ_8M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = EP7211_VIRT_FLASH2,
+               .pfn            = __phys_to_pfn(EP7211_PHYS_FLASH2),
+               .length         = SZ_8M,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init edb7211_map_io(void)
index 120b7cac84b5bf8975a0b985a9ec7879066ba363..a00f77ef8df83cdd3a6d530b7e1414eb0dfe0b75 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 
+#include <asm/sizes.h>
 #include <asm/hardware.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
  * This maps the generic CLPS711x registers
  */
 static struct map_desc clps711x_io_desc[] __initdata = {
- { CLPS7111_VIRT_BASE, CLPS7111_PHYS_BASE,     1048576, MT_DEVICE }
+       {
+               .virtual        = CLPS7111_VIRT_BASE,
+               .pfn            = __phys_to_pfn(CLPS7111_PHYS_BASE),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init clps711x_map_io(void)
diff --git a/arch/arm/mach-clps711x/mp1000-mach.c b/arch/arm/mach-clps711x/mp1000-mach.c
new file mode 100644 (file)
index 0000000..c2816bc
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ *  linux/arch/arm/mach-mp1000/mp1000.c
+ *
+ *  Copyright (C) 2005 Comdial Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/arch/mp1000-seprom.h>
+
+#include "common.h"
+
+extern void mp1000_map_io(void);
+
+static void __init mp1000_init(void)
+{
+    seprom_init();
+}
+
+MACHINE_START(MP1000, "Comdial MP1000")
+       /* Maintainer: Jon Ringle */
+       .phys_ram       = 0xc0000000,
+       .phys_io        = 0x80000000,
+       .io_pg_offst    = ((0xff000000) >> 18) & 0xfffc,
+       .boot_params    = 0xc0015100,
+       .map_io         = mp1000_map_io,
+       .init_irq       = clps711x_init_irq,
+       .init_machine   = mp1000_init,
+       .timer          = &clps711x_timer,
+MACHINE_END
+
diff --git a/arch/arm/mach-clps711x/mp1000-mm.c b/arch/arm/mach-clps711x/mp1000-mm.c
new file mode 100644 (file)
index 0000000..20e810b
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ *  linux/arch/arm/mach-mp1000/mm.c
+ *
+ *  Extra MM routines for the MP1000
+ *
+ *  Copyright (C) 2005 Comdial Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/hardware.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sizes.h>
+
+#include <asm/mach/map.h>
+
+extern void clps711x_map_io(void);
+
+static struct map_desc mp1000_io_desc[] __initdata = {
+    { MP1000_EIO_BASE, MP1000_EIO_START,       MP1000_EIO_SIZE, MT_DEVICE },
+    { MP1000_FIO_BASE, MP1000_FIO_START,       MP1000_FIO_SIZE, MT_DEVICE },
+    { MP1000_LIO_BASE, MP1000_LIO_START,       MP1000_LIO_SIZE, MT_DEVICE },
+    { MP1000_NIO_BASE, MP1000_NIO_START,       MP1000_NIO_SIZE, MT_DEVICE },
+    { MP1000_IDE_BASE, MP1000_IDE_START,       MP1000_IDE_SIZE, MT_DEVICE },
+    { MP1000_DSP_BASE, MP1000_DSP_START,       MP1000_DSP_SIZE, MT_DEVICE }
+};
+
+void __init mp1000_map_io(void)
+{
+       clps711x_map_io();
+       iotable_init(mp1000_io_desc, ARRAY_SIZE(mp1000_io_desc));
+}
diff --git a/arch/arm/mach-clps711x/mp1000-seprom.c b/arch/arm/mach-clps711x/mp1000-seprom.c
new file mode 100644 (file)
index 0000000..b22d0be
--- /dev/null
@@ -0,0 +1,195 @@
+/*`
+ * mp1000-seprom.c
+ *
+ *  This file contains the Serial EEPROM code for the MP1000 board
+ *
+ *  Copyright (C) 2005 Comdial Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/hardware.h>
+#include <asm/hardware/clps7111.h>
+#include <asm/arch/mp1000-seprom.h>
+
+/* If SepromInit() can initialize and checksum the seprom successfully, */
+/* then it will point seprom_data_ptr at the shadow copy.  */
+
+static eeprom_struct seprom_data;                      /* shadow copy of seprom content */
+
+eeprom_struct *seprom_data_ptr = 0;            /* 0 => not initialized */
+
+/*
+ * Port D Bit 5 is Chip Select for EEPROM
+ * Port E Bit 0 is Input, Data out from EEPROM
+ * Port E Bit 1 is Output, Data in to EEPROM
+ * Port E Bit 2 is Output, CLK to EEPROM
+ */
+
+static char *port_d_ptr = (char *)(CLPS7111_VIRT_BASE + PDDR);
+static char *port_e_ptr = (char *)(CLPS7111_VIRT_BASE + PEDR);
+
+#define NO_OF_SHORTS   64      // Device is 64 x 16 bits
+#define ENABLE_RW      0
+#define DISABLE_RW     1
+
+static inline void toggle_seprom_clock(void)
+{
+       *port_e_ptr |= HwPortESepromCLK;
+       *port_e_ptr &= ~(HwPortESepromCLK);
+}
+
+static inline void select_eeprom(void)
+{
+       *port_d_ptr |= HwPortDEECS;
+       *port_e_ptr &= ~(HwPortESepromCLK);
+}
+
+static inline void deselect_eeprom(void)
+{
+       *port_d_ptr &= ~(HwPortDEECS);
+       *port_e_ptr &= ~(HwPortESepromDIn);
+}
+
+/*
+ * GetSepromDataPtr - returns pointer to shadow (RAM) copy of seprom
+ *                    and returns 0 if seprom is not initialized or
+ *                    has a checksum error.
+ */
+
+eeprom_struct* get_seprom_ptr(void)
+{
+       return seprom_data_ptr;
+}
+
+unsigned char* get_eeprom_mac_address(void)
+{
+       return seprom_data_ptr->variant.eprom_struct.mac_Address;
+}
+
+/*
+ * ReadSProm, Physically reads data from the Serial PROM
+ */
+static void read_sprom(short address, int length, eeprom_struct *buffer)
+{
+       short data = COMMAND_READ | (address & 0x3F);
+       short bit;
+       int i;
+
+       select_eeprom();
+
+       // Clock in 9 bits of the command
+       for (i = 0, bit = 0x100; i < 9; i++, bit >>= 1) {
+               if (data & bit)
+                       *port_e_ptr |= HwPortESepromDIn;
+               else
+                       *port_e_ptr &= ~(HwPortESepromDIn);
+
+               toggle_seprom_clock();
+       }
+
+       //
+       // Now read one or more shorts of data from the Seprom
+       //
+       while (length-- > 0) {
+               data = 0;
+
+               // Read 16 bits at a time
+               for (i = 0; i < 16; i++) {
+                       data <<= 1;
+                       toggle_seprom_clock();
+                       data |= *port_e_ptr & HwPortESepromDOut;
+
+               }
+
+               buffer->variant.eprom_short_data[address++] = data;
+       }
+
+       deselect_eeprom();
+
+       return;
+}
+
+
+
+/*
+ * ReadSerialPROM
+ *
+ * Input: Pointer to array of 64 x 16 Bits
+ *
+ * Output: if no problem reading data is filled in
+ */
+static void read_serial_prom(eeprom_struct *data)
+{
+       read_sprom(0, 64, data);
+}
+
+
+//
+// Compute Serial EEPROM checksum
+//
+// Input: Pointer to struct with Eprom data
+//
+// Output: The computed Eprom checksum
+//
+static short compute_seprom_checksum(eeprom_struct *data)
+{
+       short checksum = 0;
+       int i;
+
+       for (i = 0; i < 126; i++) {
+               checksum += (short)data->variant.eprom_byte_data[i];
+       }
+
+       return((short)(0x5555 - (checksum & 0xFFFF)));
+}
+
+//
+// Make sure the data port bits for the SEPROM are correctly initialised
+//
+
+void __init seprom_init(void)
+{
+       short checksum;
+
+       // Init Port D
+       *(char *)(CLPS7111_VIRT_BASE + PDDDR) = 0x0;
+       *(char *)(CLPS7111_VIRT_BASE + PDDR) = 0x15;
+
+       // Init Port E
+       *(int *)(CLPS7111_VIRT_BASE + PEDDR) = 0x06;
+       *(int *)(CLPS7111_VIRT_BASE + PEDR) = 0x04;
+
+       //
+       // Make sure that EEPROM struct size never exceeds 128 bytes
+       //
+       if (sizeof(eeprom_struct) > 128) {
+               panic("Serial PROM struct size > 128, aborting read\n");
+       }
+
+       read_serial_prom(&seprom_data);
+
+       checksum = compute_seprom_checksum(&seprom_data);
+
+       if (checksum != seprom_data.variant.eprom_short_data[63]) {
+               panic("Serial EEPROM checksum failed\n");
+       }
+
+       seprom_data_ptr = &seprom_data;
+}
+
index 5bdb90edf9922956fe8bc194215559ed2dc8322d..a1acb945fb5199305acc14951349038ef5ce1968 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/pgtable.h>
 #include <asm/page.h>
 #include <asm/setup.h>
+#include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
  * We map both here.
  */
 static struct map_desc p720t_io_desc[] __initdata = {
-       { SYSPLD_VIRT_BASE,     SYSPLD_PHYS_BASE, 1048576, MT_DEVICE },
-       { 0xfe400000,           0x10400000,       1048576, MT_DEVICE }
+       {
+               .virtual        = SYSPLD_VIRT_BASE,
+               .pfn            = __phys_to_pfn(SYSPLD_PHYS_BASE),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = 0xfe400000,
+               .pfn            = __phys_to_pfn(0x10400000),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init
index e216ab8b9e8f7c048a83d057c7af36d7575b6826..0364ba4b539e075e5ecf893a6a141948fd4fa998 100644 (file)
@@ -259,10 +259,27 @@ static void __init clps7500_init_irq(void)
 }
 
 static struct map_desc cl7500_io_desc[] __initdata = {
-       { IO_BASE,      IO_START,       IO_SIZE,    MT_DEVICE },        /* IO space     */
-       { ISA_BASE,     ISA_START,      ISA_SIZE,   MT_DEVICE },        /* ISA space    */
-       { FLASH_BASE,   FLASH_START,    FLASH_SIZE, MT_DEVICE },        /* Flash        */
-       { LED_BASE,     LED_START,      LED_SIZE,   MT_DEVICE }         /* LED          */
+       {       /* IO space     */
+               .virtual        = IO_BASE,
+               .pfn            = __phys_to_pfn(IO_START),
+               .length         = IO_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* ISA space    */
+               .virtual        = ISA_BASE,
+               .pfn            = __phys_to_pfn(ISA_START),
+               .length         = ISA_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* Flash        */
+               .virtual        = FLASH_BASE,
+               .pfn            = __phys_to_pfn(FLASH_START),
+               .length         = FLASH_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* LED          */
+               .virtual        = LED_BASE,
+               .pfn            = __phys_to_pfn(LED_START),
+               .length         = LED_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init clps7500_map_io(void)
index 5aeadfd721431466ba66307c3437a716b7b32b25..15261646dcdd58bd509b86ac926e6796b6ad171b 100644 (file)
@@ -76,16 +76,42 @@ static struct map_desc ebsa110_io_desc[] __initdata = {
        /*
         * sparse external-decode ISAIO space
         */
-       { IRQ_STAT,    TRICK4_PHYS, PGDIR_SIZE,  MT_DEVICE }, /* IRQ_STAT/IRQ_MCLR */
-       { IRQ_MASK,    TRICK3_PHYS, PGDIR_SIZE,  MT_DEVICE }, /* IRQ_MASK/IRQ_MSET */
-       { SOFT_BASE,   TRICK1_PHYS, PGDIR_SIZE,  MT_DEVICE }, /* SOFT_BASE */
-       { PIT_BASE,    TRICK0_PHYS, PGDIR_SIZE,  MT_DEVICE }, /* PIT_BASE */
+       {       /* IRQ_STAT/IRQ_MCLR */
+               .virtual        = IRQ_STAT,
+               .pfn            = __phys_to_pfn(TRICK4_PHYS),
+               .length         = PGDIR_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* IRQ_MASK/IRQ_MSET */
+               .virtual        = IRQ_MASK,
+               .pfn            = __phys_to_pfn(TRICK3_PHYS),
+               .length         = PGDIR_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* SOFT_BASE */
+               .virtual        = SOFT_BASE,
+               .pfn            = __phys_to_pfn(TRICK1_PHYS),
+               .length         = PGDIR_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* PIT_BASE */
+               .virtual        = PIT_BASE,
+               .pfn            = __phys_to_pfn(TRICK0_PHYS),
+               .length         = PGDIR_SIZE,
+               .type           = MT_DEVICE
+       },
 
        /*
         * self-decode ISAIO space
         */
-       { ISAIO_BASE,  ISAIO_PHYS,  ISAIO_SIZE,  MT_DEVICE },
-       { ISAMEM_BASE, ISAMEM_PHYS, ISAMEM_SIZE, MT_DEVICE }
+       {
+               .virtual        = ISAIO_BASE,
+               .pfn            = __phys_to_pfn(ISAIO_PHYS),
+               .length         = ISAIO_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = ISAMEM_BASE,
+               .pfn            = __phys_to_pfn(ISAMEM_PHYS),
+               .length         = ISAMEM_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init ebsa110_map_io(void)
index ef7eb5dc91bd9560a80251fa33ddc338b46e8710..c648bfb676a100fbcf67d4791884305af3f6d716 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 
+#include <asm/hardware.h>
 #include <asm/io.h>
 #include <asm/page.h>
 
index 2aa57fa46da30520d4d7cbf3a180a75a0f9a1a57..e8832d0910ee52117673bb66b3a51aefbd0515fa 100644 (file)
 /* Page table mapping for I/O region */
  
 static struct map_desc epxa10db_io_desc[] __initdata = {
- { IO_ADDRESS(EXC_REGISTERS_BASE),   EXC_REGISTERS_BASE,    SZ_16K, MT_DEVICE }, 
- { IO_ADDRESS(EXC_PLD_BLOCK0_BASE),  EXC_PLD_BLOCK0_BASE,   SZ_16K, MT_DEVICE }, 
- { IO_ADDRESS(EXC_PLD_BLOCK1_BASE),  EXC_PLD_BLOCK1_BASE,   SZ_16K, MT_DEVICE }, 
- { IO_ADDRESS(EXC_PLD_BLOCK2_BASE),  EXC_PLD_BLOCK2_BASE,   SZ_16K, MT_DEVICE }, 
- { IO_ADDRESS(EXC_PLD_BLOCK3_BASE),  EXC_PLD_BLOCK3_BASE,   SZ_16K, MT_DEVICE }, 
- { FLASH_VADDR(EXC_EBI_BLOCK0_BASE), EXC_EBI_BLOCK0_BASE,   SZ_16M, MT_DEVICE }
+       {
+               .virtual        = IO_ADDRESS(EXC_REGISTERS_BASE),
+               .pfn            = __phys_to_pfn(EXC_REGISTERS_BASE),
+               .length         = SZ_16K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(EXC_PLD_BLOCK0_BASE),
+               .pfn            = __phys_to_pfn(EXC_PLD_BLOCK0_BASE),
+               .length         = SZ_16K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(EXC_PLD_BLOCK1_BASE),
+               .pfn            =__phys_to_pfn(EXC_PLD_BLOCK1_BASE),
+               .length         = SZ_16K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(EXC_PLD_BLOCK2_BASE),
+               .physical       = __phys_to_pfn(EXC_PLD_BLOCK2_BASE),
+               .length         = SZ_16K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(EXC_PLD_BLOCK3_BASE),
+               .pfn            = __phys_to_pfn(EXC_PLD_BLOCK3_BASE),
+               .length         = SZ_16K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = FLASH_VADDR(EXC_EBI_BLOCK0_BASE),
+               .pfn            = __phys_to_pfn(EXC_EBI_BLOCK0_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init epxa10db_map_io(void)
index eb8238c1ef0684334e457cc1bd432e6846b285ef..dc09fd200c164543f87f26bb665aa492f3650e65 100644 (file)
@@ -130,8 +130,17 @@ void __init footbridge_init_irq(void)
  * it means that we have extra bullet protection on our feet.
  */
 static struct map_desc fb_common_io_desc[] __initdata = {
- { ARMCSR_BASE,         DC21285_ARMCSR_BASE,       ARMCSR_SIZE,  MT_DEVICE },
- { XBUS_BASE,    0x40000000,               XBUS_SIZE,    MT_DEVICE }
+       {
+               .virtual        = ARMCSR_BASE,
+               .pfn            = DC21285_ARMCSR_BASE,
+               .length         = ARMCSR_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = XBUS_BASE,
+               .pfn            = __phys_to_pfn(0x40000000),
+               .length         = XBUS_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 /*
@@ -140,11 +149,32 @@ static struct map_desc fb_common_io_desc[] __initdata = {
  */
 static struct map_desc ebsa285_host_io_desc[] __initdata = {
 #if defined(CONFIG_ARCH_FOOTBRIDGE) && defined(CONFIG_FOOTBRIDGE_HOST)
- { PCIMEM_BASE,  DC21285_PCI_MEM,          PCIMEM_SIZE,  MT_DEVICE },
- { PCICFG0_BASE, DC21285_PCI_TYPE_0_CONFIG, PCICFG0_SIZE, MT_DEVICE },
- { PCICFG1_BASE, DC21285_PCI_TYPE_1_CONFIG, PCICFG1_SIZE, MT_DEVICE },
- { PCIIACK_BASE, DC21285_PCI_IACK,         PCIIACK_SIZE, MT_DEVICE },
- { PCIO_BASE,    DC21285_PCI_IO,           PCIO_SIZE,    MT_DEVICE }
+       {
+               .virtual        = PCIMEM_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_MEM),
+               .length         = PCIMEM_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCICFG0_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_TYPE_0_CONFIG),
+               .length         = PCICFG0_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCICFG1_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_TYPE_1_CONFIG),
+               .length         = PCICFG1_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCIIACK_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_IACK),
+               .length         = PCIIACK_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCIO_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_IO),
+               .length         = PCIO_SIZE,
+               .type           = MT_DEVICE
+       }
 #endif
 };
 
@@ -153,8 +183,17 @@ static struct map_desc ebsa285_host_io_desc[] __initdata = {
  */
 static struct map_desc co285_io_desc[] __initdata = {
 #ifdef CONFIG_ARCH_CO285
- { PCIO_BASE,   DC21285_PCI_IO,            PCIO_SIZE,    MT_DEVICE },
- { PCIMEM_BASE,         DC21285_PCI_MEM,           PCIMEM_SIZE,  MT_DEVICE }
+       {
+               .virtual        = PCIO_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_IO),
+               .length         = PCIO_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCIMEM_BASE,
+               .pfn            = __phys_to_pfn(DC21285_PCI_MEM),
+               .length         = PCIMEM_SIZE,
+               .type           = MT_DEVICE
+       }
 #endif
 };
 
index 5110e2e65ddd3176a78c4880daaec219d4eae138..c096b45693084f4bbab9a15ed3c20dadb32ce0c2 100644 (file)
@@ -237,7 +237,12 @@ void __init h720x_init_irq (void)
 }
 
 static struct map_desc h720x_io_desc[] __initdata = {
-       { IO_VIRT, IO_PHYS, IO_SIZE, MT_DEVICE },
+       {
+               .virtual        = IO_VIRT,
+               .pfn            = __phys_to_pfn(IO_PHYS),
+               .length         = IO_SIZE,
+               .type           = MT_DEVICE
+       },
 };
 
 /* Initialize io tables */
index f8a742bb2d5b6f57b90f7c3960f66323e59dd413..cb14b0682cef09892d9035b0ebd6f1bb02b43730 100644 (file)
@@ -273,8 +273,12 @@ static struct platform_device *devices[] __initdata = {
 };
 
 static struct map_desc imx_io_desc[] __initdata = {
-       /* virtual     physical    length      type */
-       {IMX_IO_BASE, IMX_IO_PHYS, IMX_IO_SIZE, MT_DEVICE},
+       {
+               .virtual        = IMX_IO_BASE,
+               .pfn            = __phys_to_pfn(IMX_IO_PHYS),
+               .length         = IMX_IO_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init
index a7511ddfe364b2123686af22746ce36ba5cc0a24..4cbdc1fe04b1cff7e0f9315b1367f6a0d24b396d 100644 (file)
@@ -61,13 +61,37 @@ mx1ads_init(void)
 }
 
 static struct map_desc mx1ads_io_desc[] __initdata = {
-       /* virtual     physical    length      type */
-       {IMX_CS0_VIRT, IMX_CS0_PHYS, IMX_CS0_SIZE, MT_DEVICE},
-       {IMX_CS1_VIRT, IMX_CS1_PHYS, IMX_CS1_SIZE, MT_DEVICE},
-       {IMX_CS2_VIRT, IMX_CS2_PHYS, IMX_CS2_SIZE, MT_DEVICE},
-       {IMX_CS3_VIRT, IMX_CS3_PHYS, IMX_CS3_SIZE, MT_DEVICE},
-       {IMX_CS4_VIRT, IMX_CS4_PHYS, IMX_CS4_SIZE, MT_DEVICE},
-       {IMX_CS5_VIRT, IMX_CS5_PHYS, IMX_CS5_SIZE, MT_DEVICE},
+       {
+               .virtual        = IMX_CS0_VIRT,
+               .pfn            = __phys_to_pfn(IMX_CS0_PHYS),
+               .length         = IMX_CS0_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IMX_CS1_VIRT,
+               .pfn            = __phys_to_pfn(IMX_CS1_PHYS),
+               .length         = IMX_CS1_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IMX_CS2_VIRT,
+               .pfn            = __phys_to_pfn(IMX_CS2_PHYS),
+               .length         = IMX_CS2_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IMX_CS3_VIRT,
+               .pfn            = __phys_to_pfn(IMX_CS3_PHYS),
+               .length         = IMX_CS3_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IMX_CS4_VIRT,
+               .pfn            = __phys_to_pfn(IMX_CS4_PHYS),
+               .length         = IMX_CS4_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IMX_CS5_VIRT,
+               .pfn            = __phys_to_pfn(IMX_CS5_PHYS),
+               .length         = IMX_CS5_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init
index 36e2b6eb67b7a0342ca0c07bf41bb8844e670dc8..f368b85f0447c220b601f44b68db2beef8a67612 100644 (file)
  */
 
 static struct map_desc ap_io_desc[] __initdata = {
- { IO_ADDRESS(INTEGRATOR_HDR_BASE),   INTEGRATOR_HDR_BASE,   SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_SC_BASE),    INTEGRATOR_SC_BASE,    SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_EBI_BASE),   INTEGRATOR_EBI_BASE,   SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_CT_BASE),    INTEGRATOR_CT_BASE,    SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_IC_BASE),    INTEGRATOR_IC_BASE,    SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_UART0_BASE), INTEGRATOR_UART0_BASE, SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_UART1_BASE), INTEGRATOR_UART1_BASE, SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_DBG_BASE),   INTEGRATOR_DBG_BASE,   SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_GPIO_BASE),  INTEGRATOR_GPIO_BASE,  SZ_4K,  MT_DEVICE },
- { PCI_MEMORY_VADDR,                  PHYS_PCI_MEM_BASE,     SZ_16M, MT_DEVICE },
- { PCI_CONFIG_VADDR,                  PHYS_PCI_CONFIG_BASE,  SZ_16M, MT_DEVICE },
- { PCI_V3_VADDR,                      PHYS_PCI_V3_BASE,      SZ_64K, MT_DEVICE },
- { PCI_IO_VADDR,                      PHYS_PCI_IO_BASE,      SZ_64K, MT_DEVICE }
+       {
+               .virtual        = IO_ADDRESS(INTEGRATOR_HDR_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_HDR_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_SC_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_SC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_EBI_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_EBI_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_CT_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_CT_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_IC_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_IC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_UART0_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_UART0_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_UART1_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_UART1_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_DBG_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_DBG_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_GPIO_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_GPIO_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCI_MEMORY_VADDR,
+               .pfn            = __phys_to_pfn(PHYS_PCI_MEM_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCI_CONFIG_VADDR,
+               .pfn            = __phys_to_pfn(PHYS_PCI_CONFIG_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCI_V3_VADDR,
+               .pfn            = __phys_to_pfn(PHYS_PCI_V3_BASE),
+               .length         = SZ_64K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = PCI_IO_VADDR,
+               .pfn            = __phys_to_pfn(PHYS_PCI_IO_BASE),
+               .length         = SZ_64K,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init ap_map_io(void)
index 2be5c03ab87f858c07168e646396566b8fef3106..aa34c58b96c411abda5198ffaa7d44ec53f290d2 100644 (file)
  */
 
 static struct map_desc intcp_io_desc[] __initdata = {
- { IO_ADDRESS(INTEGRATOR_HDR_BASE),   INTEGRATOR_HDR_BASE,   SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_SC_BASE),    INTEGRATOR_SC_BASE,    SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_EBI_BASE),   INTEGRATOR_EBI_BASE,   SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_CT_BASE),    INTEGRATOR_CT_BASE,    SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_IC_BASE),    INTEGRATOR_IC_BASE,    SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_UART0_BASE), INTEGRATOR_UART0_BASE, SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_UART1_BASE), INTEGRATOR_UART1_BASE, SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_DBG_BASE),   INTEGRATOR_DBG_BASE,   SZ_4K,  MT_DEVICE },
- { IO_ADDRESS(INTEGRATOR_GPIO_BASE),  INTEGRATOR_GPIO_BASE,  SZ_4K,  MT_DEVICE },
- { 0xfca00000, 0xca000000, SZ_4K, MT_DEVICE },
- { 0xfcb00000, 0xcb000000, SZ_4K, MT_DEVICE },
+       {
+               .virtual        = IO_ADDRESS(INTEGRATOR_HDR_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_HDR_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_SC_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_SC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_EBI_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_EBI_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_CT_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_CT_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_IC_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_IC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_UART0_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_UART0_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_UART1_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_UART1_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_DBG_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_DBG_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = IO_ADDRESS(INTEGRATOR_GPIO_BASE),
+               .pfn            = __phys_to_pfn(INTEGRATOR_GPIO_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = 0xfca00000,
+               .pfn            = __phys_to_pfn(0xca000000),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = 0xfcb00000,
+               .pfn            = __phys_to_pfn(0xcb000000),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init intcp_map_io(void)
index 0f921ba2750c2f8a7cc0e2ce9ca94c53c4753b41..bb5091223b638120291b9ee15e78693ff227638f 100644 (file)
  * Standard IO mapping for all IOP321 based systems
  */
 static struct map_desc iop321_std_desc[] __initdata = {
- /* virtual     physical      length      type */
-
- /* mem mapped registers */
- { IOP321_VIRT_MEM_BASE,  IOP321_PHYS_MEM_BASE,   0x00002000,  MT_DEVICE },
-
- /* PCI IO space */
- { IOP321_PCI_LOWER_IO_VA,  IOP321_PCI_LOWER_IO_PA,   IOP321_PCI_IO_WINDOW_SIZE,  MT_DEVICE }
+        {      /* mem mapped registers */
+               .virtual        = IOP321_VIRT_MEM_BASE,
+               .pfn            = __phys_to_pfn(IOP321_PHYS_MEM_BASE),
+               .length         = 0x00002000,
+               .type           = MT_DEVICE
+        }, {   /* PCI IO space */
+               .virtual        = IOP321_PCI_LOWER_IO_VA,
+               .pfn            = __phys_to_pfn(IOP321_PCI_LOWER_IO_PA),
+               .length         = IOP321_PCI_IO_WINDOW_SIZE,
+               .type           = MT_DEVICE
+        }
 };
 
 #ifdef CONFIG_ARCH_IQ80321
index fc74b722f72f749be8fc81ae854f2fe150d9fce7..a2533c3ab42f6671f26d5d84250b30410e5ac1f7 100644 (file)
  * Standard IO mapping for all IOP331 based systems
  */
 static struct map_desc iop331_std_desc[] __initdata = {
- /* virtual     physical      length      type */
-
- /* mem mapped registers */
- { IOP331_VIRT_MEM_BASE,  IOP331_PHYS_MEM_BASE,   0x00002000,  MT_DEVICE },
-
- /* PCI IO space */
- { IOP331_PCI_LOWER_IO_VA,  IOP331_PCI_LOWER_IO_PA,   IOP331_PCI_IO_WINDOW_SIZE,  MT_DEVICE }
+       {       /* mem mapped registers */
+               .virtual        = IOP331_VIRT_MEM_BASE,
+               .pfn            = __phys_to_pfn(IOP331_PHYS_MEM_BASE),
+               .length         = 0x00002000,
+               .type           = MT_DEVICE
+       }, {    /* PCI IO space */
+               .virtual        = IOP331_PCI_LOWER_IO_VA,
+               .pfn            = __phys_to_pfn(IOP331_PCI_LOWER_IO_PA),
+               .length         = IOP331_PCI_IO_WINDOW_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static struct uart_port iop331_serial_ports[] = {
index 55992ab586baf87d2418f2f91c865460321b04a0..e874b54eefe3782c7d2e9c48ffcf550e246e1223 100644 (file)
  * We use RedBoot's setup for the onboard devices.
  */
 static struct map_desc iq31244_io_desc[] __initdata = {
- /* virtual     physical      length        type */
-
- /* on-board devices */
- { IQ31244_UART, IQ31244_UART,   0x00100000,   MT_DEVICE }
+       {       /* on-board devices */
+               .virtual        = IQ31244_UART,
+               .pfn            = __phys_to_pfn(IQ31244_UART),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init iq31244_map_io(void)
index bb3e9e5a9aff0e218ec0e9967e680cdecd2160e7..d9cac5e1fc3d9b7c197d3fb5e6af731ae16b53ef 100644 (file)
  * We use RedBoot's setup for the onboard devices.
  */
 static struct map_desc iq80321_io_desc[] __initdata = {
- /* virtual     physical      length        type */
-
- /* on-board devices */
- { IQ80321_UART, IQ80321_UART,   0x00100000,   MT_DEVICE }
+       {       /* on-board devices */
+               .virtual        = IQ80321_UART,
+               .pfn            = __phys_to_pfn(IQ80321_UART),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init iq80321_map_io(void)
index f4d7f1f6ef851253d3466b39d403815dd07209fb..01c393c504d0ba3cb4447af6d19e141b47ad25e0 100644 (file)
@@ -83,42 +83,42 @@ void ixp2000_release_slowport(struct slowport_cfg *old_cfg)
 static struct map_desc ixp2000_io_desc[] __initdata = {
        {
                .virtual        = IXP2000_CAP_VIRT_BASE,
-               .physical       = IXP2000_CAP_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_CAP_PHYS_BASE),
                .length         = IXP2000_CAP_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_INTCTL_VIRT_BASE,
-               .physical       = IXP2000_INTCTL_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_INTCTL_PHYS_BASE),
                .length         = IXP2000_INTCTL_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_PCI_CREG_VIRT_BASE,
-               .physical       = IXP2000_PCI_CREG_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_PCI_CREG_PHYS_BASE),
                .length         = IXP2000_PCI_CREG_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_PCI_CSR_VIRT_BASE,
-               .physical       = IXP2000_PCI_CSR_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_PCI_CSR_PHYS_BASE),
                .length         = IXP2000_PCI_CSR_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_MSF_VIRT_BASE,
-               .physical       = IXP2000_MSF_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_MSF_PHYS_BASE),
                .length         = IXP2000_MSF_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_PCI_IO_VIRT_BASE,
-               .physical       = IXP2000_PCI_IO_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_PCI_IO_PHYS_BASE),
                .length         = IXP2000_PCI_IO_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_PCI_CFG0_VIRT_BASE,
-               .physical       = IXP2000_PCI_CFG0_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_PCI_CFG0_PHYS_BASE),
                .length         = IXP2000_PCI_CFG0_SIZE,
                .type           = MT_DEVICE
        }, {
                .virtual        = IXP2000_PCI_CFG1_VIRT_BASE,
-               .physical       = IXP2000_PCI_CFG1_PHYS_BASE,
+               .pfn            = __phys_to_pfn(IXP2000_PCI_CFG1_PHYS_BASE),
                .length         = IXP2000_PCI_CFG1_SIZE,
                .type           = MT_DEVICE
        }
index 63ba0191aa6572c5ee98db13713cd62096f2aed7..8b4a839b6279baf95c13eef696d4969623f7d418 100644 (file)
@@ -176,7 +176,7 @@ void ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long
  *************************************************************************/
 static struct map_desc ixdp2x00_io_desc __initdata = {
        .virtual        = IXDP2X00_VIRT_CPLD_BASE, 
-       .physical       = IXDP2X00_PHYS_CPLD_BASE,
+       .pfn            = __phys_to_pfn(IXDP2X00_PHYS_CPLD_BASE),
        .length         = IXDP2X00_CPLD_SIZE,
        .type           = MT_DEVICE
 };
index 7a51099212877c94efef75908be49c37e312ecdf..fee1d7b73503f67853e1f86f65879de9c70590b9 100644 (file)
@@ -136,7 +136,7 @@ void __init ixdp2x01_init_irq(void)
  *************************************************************************/
 static struct map_desc ixdp2x01_io_desc __initdata = {
        .virtual        = IXDP2X01_VIRT_CPLD_BASE, 
-       .physical       = IXDP2X01_PHYS_CPLD_BASE,
+       .pfn            = __phys_to_pfn(IXDP2X01_PHYS_CPLD_BASE),
        .length         = IXDP2X01_CPLD_REGION_SIZE,
        .type           = MT_DEVICE
 };
index 36b6045213eec8b4265da0dda508590e565ff7fe..6c396447c4e078d8ce7e1807c072edfc8c704bb7 100644 (file)
 static struct map_desc ixp4xx_io_desc[] __initdata = {
        {       /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */
                .virtual        = IXP4XX_PERIPHERAL_BASE_VIRT,
-               .physical       = IXP4XX_PERIPHERAL_BASE_PHYS,
+               .pfn            = __phys_to_pfn(IXP4XX_PERIPHERAL_BASE_PHYS),
                .length         = IXP4XX_PERIPHERAL_REGION_SIZE,
                .type           = MT_DEVICE
        }, {    /* Expansion Bus Config Registers */
                .virtual        = IXP4XX_EXP_CFG_BASE_VIRT,
-               .physical       = IXP4XX_EXP_CFG_BASE_PHYS,
+               .pfn            = __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS),
                .length         = IXP4XX_EXP_CFG_REGION_SIZE,
                .type           = MT_DEVICE
        }, {    /* PCI Registers */
                .virtual        = IXP4XX_PCI_CFG_BASE_VIRT,
-               .physical       = IXP4XX_PCI_CFG_BASE_PHYS,
+               .pfn            = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
                .length         = IXP4XX_PCI_CFG_REGION_SIZE,
                .type           = MT_DEVICE
        },
 #ifdef CONFIG_DEBUG_LL
        {       /* Debug UART mapping */
                .virtual        = IXP4XX_DEBUG_UART_BASE_VIRT,
-               .physical       = IXP4XX_DEBUG_UART_BASE_PHYS,
+               .pfn            = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS),
                .length         = IXP4XX_DEBUG_UART_REGION_SIZE,
                .type           = MT_DEVICE
        }
index cb3dcd3bd00a5a25fb4b39533ec9d6397ef284a3..19f2fa2244c451c8d203a7f8719164cef49b0ff6 100644 (file)
       /* This function calls the board specific IRQ initialization function. */
 
 static struct map_desc kev7a400_io_desc[] __initdata = {
-       { IO_VIRT,    IO_PHYS,    IO_SIZE,    MT_DEVICE },
-       { CPLD_VIRT,  CPLD_PHYS,  CPLD_SIZE,  MT_DEVICE },
+       {
+               .virtual        = IO_VIRT,
+               .pfn            = __phys_to_pfn(IO_PHYS),
+               .length         = IO_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD_VIRT,
+               .pfn            = __phys_to_pfn(CPLD_PHYS),
+               .length         = CPLD_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init kev7a400_map_io(void)
index 6eb61a17c63be3cb9527dfb312fbf3b699a20f31..a20eabc132b08464c43d65d8915051316cf91b0a 100644 (file)
@@ -227,23 +227,79 @@ void __init lh7a40x_init_board_irq (void)
 }
 
 static struct map_desc lpd7a400_io_desc[] __initdata = {
-       {     IO_VIRT,      IO_PHYS,        IO_SIZE,    MT_DEVICE },
-       /* Mapping added to work around chip select problems */
-       { IOBARRIER_VIRT, IOBARRIER_PHYS, IOBARRIER_SIZE, MT_DEVICE },
-       { CF_VIRT,      CF_PHYS,        CF_SIZE,        MT_DEVICE },
+       {
+               .virtual        =     IO_VIRT,
+               .pfn            = __phys_to_pfn(IO_PHYS),
+               .length         =           IO_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* Mapping added to work around chip select problems */
+               .virtual        = IOBARRIER_VIRT,
+               .pfn            = __phys_to_pfn(IOBARRIER_PHYS),
+               .length         = IOBARRIER_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CF_VIRT,
+               .pfn            = __phys_to_pfn(CF_PHYS),
+               .length         =       CF_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD02_VIRT,
+               .pfn            = __phys_to_pfn(CPLD02_PHYS),
+               .length         =       CPLD02_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD06_VIRT,
+               .pfn            = __phys_to_pfn(CPLD06_PHYS),
+               .length         =       CPLD06_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD08_VIRT,
+               .pfn            = __phys_to_pfn(CPLD08_PHYS),
+               .length         =       CPLD08_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD0C_VIRT,
+               .pfn            = __phys_to_pfn(CPLD0C_PHYS),
+               .length         =       CPLD0C_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD0E_VIRT,
+               .pfn            = __phys_to_pfn(CPLD0E_PHYS),
+               .length         =       CPLD0E_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD10_VIRT,
+               .pfn            = __phys_to_pfn(CPLD10_PHYS),
+               .length         =       CPLD10_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD12_VIRT,
+               .pfn            = __phys_to_pfn(CPLD12_PHYS),
+               .length         =       CPLD12_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD14_VIRT,
+               .pfn            = __phys_to_pfn(CPLD14_PHYS),
+               .length         =       CPLD14_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD16_VIRT,
+               .pfn            = __phys_to_pfn(CPLD16_PHYS),
+               .length         =       CPLD16_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD18_VIRT,
+               .pfn            = __phys_to_pfn(CPLD18_PHYS),
+               .length         =       CPLD18_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = CPLD1A_VIRT,
+               .pfn            = __phys_to_pfn(CPLD1A_PHYS),
+               .length         =       CPLD1A_SIZE,
+               .type           = MT_DEVICE
+       },
        /* This mapping is redundant since the smc driver performs another. */
 /*     { CPLD00_VIRT,  CPLD00_PHYS,    CPLD00_SIZE,    MT_DEVICE }, */
-       { CPLD02_VIRT,  CPLD02_PHYS,    CPLD02_SIZE,    MT_DEVICE },
-       { CPLD06_VIRT,  CPLD06_PHYS,    CPLD06_SIZE,    MT_DEVICE },
-       { CPLD08_VIRT,  CPLD08_PHYS,    CPLD08_SIZE,    MT_DEVICE },
-       { CPLD0C_VIRT,  CPLD0C_PHYS,    CPLD0C_SIZE,    MT_DEVICE },
-       { CPLD0E_VIRT,  CPLD0E_PHYS,    CPLD0E_SIZE,    MT_DEVICE },
-       { CPLD10_VIRT,  CPLD10_PHYS,    CPLD10_SIZE,    MT_DEVICE },
-       { CPLD12_VIRT,  CPLD12_PHYS,    CPLD12_SIZE,    MT_DEVICE },
-       { CPLD14_VIRT,  CPLD14_PHYS,    CPLD14_SIZE,    MT_DEVICE },
-       { CPLD16_VIRT,  CPLD16_PHYS,    CPLD16_SIZE,    MT_DEVICE },
-       { CPLD18_VIRT,  CPLD18_PHYS,    CPLD18_SIZE,    MT_DEVICE },
-       { CPLD1A_VIRT,  CPLD1A_PHYS,    CPLD1A_SIZE,    MT_DEVICE },
 };
 
 void __init
index df0312b596e484a6e79cf5356c767020e8d55c54..fd9183ff2ed53c03252acbb5faa86f687c567ed4 100644 (file)
@@ -103,8 +103,12 @@ static struct platform_device innovator_flash_device = {
 
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
 static struct map_desc innovator1510_io_desc[] __initdata = {
-{ OMAP1510_FPGA_BASE, OMAP1510_FPGA_START, OMAP1510_FPGA_SIZE,
-       MT_DEVICE },
+       {
+               .virtual        = OMAP1510_FPGA_BASE,
+               .pfn            = __phys_to_pfn(OMAP1510_FPGA_START),
+               .length         = OMAP1510_FPGA_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static struct resource innovator1510_smc91x_resources[] = {
index 107c68c8ab54b796fe7f5271adb11a40c52de760..2ba26e239108f87e9b51f38a6ade736256ffd366 100644 (file)
@@ -134,8 +134,12 @@ void omap_perseus2_init_irq(void)
 
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
 static struct map_desc omap_perseus2_io_desc[] __initdata = {
-       {H2P2_DBG_FPGA_BASE, H2P2_DBG_FPGA_START, H2P2_DBG_FPGA_SIZE,
-        MT_DEVICE},
+       {
+               .virtual        = H2P2_DBG_FPGA_BASE,
+               .pfn            = __phys_to_pfn(H2P2_DBG_FPGA_START),
+               .length         = H2P2_DBG_FPGA_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init omap_perseus2_map_io(void)
index eb8261d7dead9b0eb99ae98887f6a540c79450f8..79fb86535ebcb122baca48d47ee14a867c1706ae 100644 (file)
@@ -26,27 +26,59 @@ extern void omap_sram_init(void);
  * default mapping provided here.
  */
 static struct map_desc omap_io_desc[] __initdata = {
- { IO_VIRT,            IO_PHYS,             IO_SIZE,              MT_DEVICE },
+       {
+               .virtual        = IO_VIRT,
+               .pfn            = __phys_to_pfn(IO_PHYS),
+               .length         = IO_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 #ifdef CONFIG_ARCH_OMAP730
 static struct map_desc omap730_io_desc[] __initdata = {
- { OMAP730_DSP_BASE,    OMAP730_DSP_START,    OMAP730_DSP_SIZE,    MT_DEVICE },
- { OMAP730_DSPREG_BASE, OMAP730_DSPREG_START, OMAP730_DSPREG_SIZE, MT_DEVICE },
+       {
+               .virtual        = OMAP730_DSP_BASE,
+               .pfn            = __phys_to_pfn(OMAP730_DSP_START),
+               .length         = OMAP730_DSP_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = OMAP730_DSPREG_BASE,
+               .pfn            = __phys_to_pfn(OMAP730_DSPREG_START),
+               .length         = OMAP730_DSPREG_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 #endif
 
 #ifdef CONFIG_ARCH_OMAP1510
 static struct map_desc omap1510_io_desc[] __initdata = {
- { OMAP1510_DSP_BASE,    OMAP1510_DSP_START,    OMAP1510_DSP_SIZE,    MT_DEVICE },
- { OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_START, OMAP1510_DSPREG_SIZE, MT_DEVICE },
+       {
+               .virtual        = OMAP1510_DSP_BASE,
+               .pfn            = __phys_to_pfn(OMAP1510_DSP_START),
+               .length         = OMAP1510_DSP_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = OMAP1510_DSPREG_BASE,
+               .pfn            = __phys_to_pfn(OMAP1510_DSPREG_START),
+               .length         = OMAP1510_DSPREG_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 #endif
 
 #if defined(CONFIG_ARCH_OMAP16XX)
 static struct map_desc omap16xx_io_desc[] __initdata = {
- { OMAP16XX_DSP_BASE,    OMAP16XX_DSP_START,    OMAP16XX_DSP_SIZE,    MT_DEVICE },
- { OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_START, OMAP16XX_DSPREG_SIZE, MT_DEVICE },
+       {
+               .virtual        = OMAP16XX_DSP_BASE,
+               .pfn            = __phys_to_pfn(OMAP16XX_DSP_START),
+               .length         = OMAP16XX_DSP_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = OMAP16XX_DSPREG_BASE,
+               .pfn            = __phys_to_pfn(OMAP16XX_DSPREG_START),
+               .length         = OMAP16XX_DSPREG_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 #endif
 
index 1d7677669a7628eb0830ab280feb5ce932e54097..3248bc9b94955ba934c914c6aef4763ae4e9a1c2 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/arch/udc.h>
 #include <asm/arch/pxafb.h>
 #include <asm/arch/mmc.h>
+#include <asm/arch/irda.h>
 #include <asm/arch/i2c.h>
 
 #include "generic.h"
@@ -92,14 +93,42 @@ EXPORT_SYMBOL(pxa_set_cken);
  *         and cache flush area.
  */
 static struct map_desc standard_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-  { 0xf2000000, 0x40000000, 0x02000000, MT_DEVICE }, /* Devs */
-  { 0xf4000000, 0x44000000, 0x00100000, MT_DEVICE }, /* LCD */
-  { 0xf6000000, 0x48000000, 0x00100000, MT_DEVICE }, /* Mem Ctl */
-  { 0xf8000000, 0x4c000000, 0x00100000, MT_DEVICE }, /* USB host */
-  { 0xfa000000, 0x50000000, 0x00100000, MT_DEVICE }, /* Camera */
-  { 0xfe000000, 0x58000000, 0x00100000, MT_DEVICE }, /* IMem ctl */
-  { 0xff000000, 0x00000000, 0x00100000, MT_DEVICE }  /* UNCACHED_PHYS_0 */
+       {       /* Devs */
+               .virtual        =  0xf2000000,
+               .pfn            = __phys_to_pfn(0x40000000),
+               .length         = 0x02000000,
+               .type           = MT_DEVICE
+       }, {    /* LCD */
+               .virtual        =  0xf4000000,
+               .pfn            = __phys_to_pfn(0x44000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* Mem Ctl */
+               .virtual        =  0xf6000000,
+               .pfn            = __phys_to_pfn(0x48000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* USB host */
+               .virtual        =  0xf8000000,
+               .pfn            = __phys_to_pfn(0x4c000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* Camera */
+               .virtual        =  0xfa000000,
+               .pfn            = __phys_to_pfn(0x50000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* IMem ctl */
+               .virtual        =  0xfe000000,
+               .pfn            = __phys_to_pfn(0x58000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* UNCACHED_PHYS_0 */
+               .virtual        = 0xff000000,
+               .pfn            = __phys_to_pfn(0x00000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init pxa_map_io(void)
@@ -225,6 +254,10 @@ static struct platform_device stuart_device = {
        .name           = "pxa2xx-uart",
        .id             = 2,
 };
+static struct platform_device hwuart_device = {
+       .name           = "pxa2xx-uart",
+       .id             = 3,
+};
 
 static struct resource i2c_resources[] = {
        {
@@ -265,10 +298,26 @@ static struct resource i2s_resources[] = {
 static struct platform_device i2s_device = {
        .name           = "pxa2xx-i2s",
        .id             = -1,
-       .resource       = i2c_resources,
+       .resource       = i2s_resources,
        .num_resources  = ARRAY_SIZE(i2s_resources),
 };
 
+static u64 pxaficp_dmamask = ~(u32)0;
+
+static struct platform_device pxaficp_device = {
+       .name           = "pxa2xx-ir",
+       .id             = -1,
+       .dev            = {
+               .dma_mask = &pxaficp_dmamask,
+               .coherent_dma_mask = 0xffffffff,
+       },
+};
+
+void __init pxa_set_ficp_info(struct pxaficp_platform_data *info)
+{
+       pxaficp_device.dev.platform_data = info;
+}
+
 static struct platform_device *devices[] __initdata = {
        &pxamci_device,
        &udc_device,
@@ -276,13 +325,26 @@ static struct platform_device *devices[] __initdata = {
        &ffuart_device,
        &btuart_device,
        &stuart_device,
+       &pxaficp_device,
        &i2c_device,
        &i2s_device,
 };
 
 static int __init pxa_init(void)
 {
-       return platform_add_devices(devices, ARRAY_SIZE(devices));
+       int cpuid, ret;
+
+       ret = platform_add_devices(devices, ARRAY_SIZE(devices));
+       if (ret)
+               return ret;
+
+       /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */
+       cpuid = read_cpuid(CPUID_ID);
+       if (((cpuid >> 4) & 0xfff) == 0x2d0 ||
+           ((cpuid >> 4) & 0xfff) == 0x290)
+               ret = platform_device_register(&hwuart_device);
+
+       return ret;
 }
 
 subsys_initcall(pxa_init);
index 386e107b53cc754fd0e7dfe8dfa01ed57e94bc5d..01a83ab09ac340a8f7fadb0792a948c067c93e08 100644 (file)
@@ -152,16 +152,17 @@ static void __init idp_init_irq(void)
 }
 
 static struct map_desc idp_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-
-  { IDP_COREVOLT_VIRT,
-    IDP_COREVOLT_PHYS,
-    IDP_COREVOLT_SIZE,
-    MT_DEVICE },
-  { IDP_CPLD_VIRT,
-    IDP_CPLD_PHYS,
-    IDP_CPLD_SIZE,
-    MT_DEVICE }
+       {
+               .virtual        =  IDP_COREVOLT_VIRT,
+               .pfn            = __phys_to_pfn(IDP_COREVOLT_PHYS),
+               .length         = IDP_COREVOLT_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  IDP_CPLD_VIRT,
+               .pfn            = __phys_to_pfn(IDP_CPLD_PHYS),
+               .length         = IDP_CPLD_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init idp_map_io(void)
index 1f38033921e95c385446cbebf38b9a1d9fa4e4cd..beccf455f796f5a0b1d3be968ba63e2692ada432 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/arch/pxa-regs.h>
 #include <asm/arch/lubbock.h>
 #include <asm/arch/udc.h>
+#include <asm/arch/irda.h>
 #include <asm/arch/pxafb.h>
 #include <asm/arch/mmc.h>
 
@@ -237,16 +238,40 @@ static struct pxamci_platform_data lubbock_mci_platform_data = {
        .init           = lubbock_mci_init,
 };
 
+static void lubbock_irda_transceiver_mode(struct device *dev, int mode)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       if (mode & IR_SIRMODE) {
+               LUB_MISC_WR &= ~(1 << 4);
+       } else if (mode & IR_FIRMODE) {
+               LUB_MISC_WR |= 1 << 4;
+       }
+       local_irq_restore(flags);
+}
+
+static struct pxaficp_platform_data lubbock_ficp_platform_data = {
+       .transceiver_cap  = IR_SIRMODE | IR_FIRMODE,
+       .transceiver_mode = lubbock_irda_transceiver_mode,
+};
+
 static void __init lubbock_init(void)
 {
        pxa_set_udc_info(&udc_info);
        set_pxa_fb_info(&sharp_lm8v31);
        pxa_set_mci_info(&lubbock_mci_platform_data);
+       pxa_set_ficp_info(&lubbock_ficp_platform_data);
        (void) platform_add_devices(devices, ARRAY_SIZE(devices));
 }
 
 static struct map_desc lubbock_io_desc[] __initdata = {
-  { LUBBOCK_FPGA_VIRT, LUBBOCK_FPGA_PHYS, 0x00100000, MT_DEVICE }, /* CPLD */
+       {       /* CPLD */
+               .virtual        =  LUBBOCK_FPGA_VIRT,
+               .pfn            = __phys_to_pfn(LUBBOCK_FPGA_PHYS),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init lubbock_map_io(void)
index 85fdb5b1470a140faa2e5b6a1d9fd02612ac337d..a48c64026e1fbb4be74321b305874b20ad7828b0 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/arch/audio.h>
 #include <asm/arch/pxafb.h>
 #include <asm/arch/mmc.h>
+#include <asm/arch/irda.h>
 
 #include "generic.h"
 
@@ -294,6 +295,29 @@ static struct pxamci_platform_data mainstone_mci_platform_data = {
        .exit           = mainstone_mci_exit,
 };
 
+static void mainstone_irda_transceiver_mode(struct device *dev, int mode)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       if (mode & IR_SIRMODE) {
+               MST_MSCWR1 &= ~MST_MSCWR1_IRDA_FIR;
+       } else if (mode & IR_FIRMODE) {
+               MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR;
+       }
+       if (mode & IR_OFF) {
+               MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF;
+       } else {
+               MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_FULL;
+       }
+       local_irq_restore(flags);
+}
+
+static struct pxaficp_platform_data mainstone_ficp_platform_data = {
+       .transceiver_cap  = IR_SIRMODE | IR_FIRMODE | IR_OFF,
+       .transceiver_mode = mainstone_irda_transceiver_mode,
+};
+
 static void __init mainstone_init(void)
 {
        /*
@@ -313,11 +337,17 @@ static void __init mainstone_init(void)
                set_pxa_fb_info(&toshiba_ltm035a776c);
 
        pxa_set_mci_info(&mainstone_mci_platform_data);
+       pxa_set_ficp_info(&mainstone_ficp_platform_data);
 }
 
 
 static struct map_desc mainstone_io_desc[] __initdata = {
-  { MST_FPGA_VIRT, MST_FPGA_PHYS, 0x00100000, MT_DEVICE }, /* CPLD */
+       {       /* CPLD */
+               .virtual        =  MST_FPGA_VIRT,
+               .pfn            = __phys_to_pfn(MST_FPGA_PHYS),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init mainstone_map_io(void)
index 7869c3b4e62f0447eee5737dc855b0b99afe5d10..573a5758e781c366138de117529b92c04a71acce 100644 (file)
@@ -129,7 +129,7 @@ void pxa_cpu_pm_enter(suspend_state_t state)
        case PM_SUSPEND_MEM:
                /* set resume return address */
                PSPR = virt_to_phys(pxa_cpu_resume);
-               pxa_cpu_suspend(3);
+               pxa_cpu_suspend(PWRMODE_SLEEP);
                break;
        }
 }
index 9a791b07118df2a71163db05cb476d5a44b614b7..09a5d593f04b27c2558850e76c0e99dcee861967 100644 (file)
@@ -157,7 +157,7 @@ void pxa_cpu_pm_enter(suspend_state_t state)
        case PM_SUSPEND_MEM:
                /* set resume return address */
                PSPR = virt_to_phys(pxa_cpu_resume);
-               pxa_cpu_suspend(3);
+               pxa_cpu_suspend(PWRMODE_SLEEP);
                break;
        }
 }
index 5786ccad938cef12878a7c5a19db33617e16c50d..c9862688ff3d6694f6e812717bdb41eeb2e84cc6 100644 (file)
@@ -28,7 +28,9 @@
 /*
  * pxa_cpu_suspend()
  *
- * Forces CPU into sleep state
+ * Forces CPU into sleep state.
+ *
+ * r0 = value for PWRMODE M field for desired sleep state
  */
 
 ENTRY(pxa_cpu_suspend)
@@ -53,6 +55,7 @@ ENTRY(pxa_cpu_suspend)
        mov     r10, sp
        stmfd   sp!, {r3 - r10}
 
+       mov r5, r0                              @ save sleep mode
        @ preserve phys address of stack
        mov     r0, sp
        bl      sleep_phys_sp
@@ -66,7 +69,7 @@ ENTRY(pxa_cpu_suspend)
        @ (also workaround for sighting 28071)
 
        @ prepare value for sleep mode
-       mov     r1, #3                          @ sleep mode
+       mov     r1, r5                          @ sleep mode
 
        @ prepare pointer to physical address 0 (virtual mapping in generic.c)
        mov     r2, #UNCACHED_PHYS_0
index 8a3f27b76784e6bc1d8d6434865c906e6821ee89..6f6dbbd0802137f9d9cbe6d811226e54a2b3b238 100644 (file)
@@ -21,7 +21,7 @@
 ENTRY(pxa_cpu_standby)
        ldr     r0, =PSSR
        mov     r1, #(PSSR_PH | PSSR_STS)
-       mov     r2, #2
+       mov     r2, #PWRMODE_STANDBY
        mov     r3, #UNCACHED_PHYS_0    @ Read mem context in.
        ldr     ip, [r3]
        b       1f
index e3587efec4bf685683d3f1a812e791cd41430f0a..5c4ac1c008a63439995ca2dcda55e5122ec5521b 100644 (file)
@@ -61,9 +61,22 @@ static int __init parse_tag_acorn(const struct tag *tag)
 __tagtable(ATAG_ACORN, parse_tag_acorn);
 
 static struct map_desc rpc_io_desc[] __initdata = {
- { SCREEN_BASE,        SCREEN_START,   2*1048576, MT_DEVICE }, /* VRAM         */
- { (u32)IO_BASE, IO_START,     IO_SIZE  , MT_DEVICE }, /* IO space     */
- { EASI_BASE,  EASI_START,     EASI_SIZE, MT_DEVICE }  /* EASI space   */
+       {       /* VRAM         */
+               .virtual        =  SCREEN_BASE,
+               .pfn            = __phys_to_pfn(SCREEN_START),
+               .length         =       2*1048576,
+               .type           = MT_DEVICE
+       }, {    /* IO space     */
+               .virtual        =  (u32)IO_BASE,
+               .pfn            = __phys_to_pfn(IO_START),
+               .length         =       IO_SIZE  ,
+               .type           = MT_DEVICE
+       }, {    /* EASI space   */
+               .virtual        = EASI_BASE,
+               .pfn            = __phys_to_pfn(EASI_START),
+               .length         = EASI_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init rpc_map_io(void)
index 478c15c0e36a334d328ccab31c141b00ef20153f..9cbe5eef492b26a26a4d586ad6d96266f37de8fb 100644 (file)
@@ -21,7 +21,7 @@
 
 /* todo - fix when rmk changes iodescs to use `void __iomem *` */
 
-#define IODESC_ENT(x) { (unsigned long)S3C24XX_VA_##x, S3C2410_PA_##x, S3C24XX_SZ_##x, MT_DEVICE }
+#define IODESC_ENT(x) { (unsigned long)S3C24XX_VA_##x, __phys_to_pfn(S3C2410_PA_##x), S3C24XX_SZ_##x, MT_DEVICE }
 
 #ifndef MHZ
 #define MHZ (1000*1000)
index 0077937a7ab865f67faaca5ff684d2ca3cc0db6f..08bc7d95a45d78c0290b8d5c15eef4c934fd8105 100644 (file)
@@ -47,7 +47,7 @@ struct platform_device *s3c24xx_uart_devs[3];
 static struct resource s3c_usb_resource[] = {
        [0] = {
                .start = S3C2410_PA_USBHOST,
-               .end   = S3C2410_PA_USBHOST + S3C24XX_SZ_USBHOST,
+               .end   = S3C2410_PA_USBHOST + S3C24XX_SZ_USBHOST - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(s3c_device_usb);
 static struct resource s3c_lcd_resource[] = {
        [0] = {
                .start = S3C2410_PA_LCD,
-               .end   = S3C2410_PA_LCD + S3C24XX_SZ_LCD,
+               .end   = S3C2410_PA_LCD + S3C24XX_SZ_LCD - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -103,21 +103,25 @@ struct platform_device s3c_device_lcd = {
 
 EXPORT_SYMBOL(s3c_device_lcd);
 
-static struct s3c2410fb_mach_info s3c2410fb_info;
-
-void __init set_s3c2410fb_info(struct s3c2410fb_mach_info *hard_s3c2410fb_info)
+void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *pd)
 {
-       memcpy(&s3c2410fb_info,hard_s3c2410fb_info,sizeof(struct s3c2410fb_mach_info));
-       s3c_device_lcd.dev.platform_data = &s3c2410fb_info;
+       struct s3c2410fb_mach_info *npd;
+
+       npd = kmalloc(sizeof(*npd), GFP_KERNEL);
+       if (npd) {
+               memcpy(npd, pd, sizeof(*npd));
+               s3c_device_lcd.dev.platform_data = npd;
+       } else {
+               printk(KERN_ERR "no memory for LCD platform data\n");
+       }
 }
-EXPORT_SYMBOL(set_s3c2410fb_info);
 
 /* NAND Controller */
 
 static struct resource s3c_nand_resource[] = {
        [0] = {
                .start = S3C2410_PA_NAND,
-               .end   = S3C2410_PA_NAND + S3C24XX_SZ_NAND,
+               .end   = S3C2410_PA_NAND + S3C24XX_SZ_NAND - 1,
                .flags = IORESOURCE_MEM,
        }
 };
@@ -136,7 +140,7 @@ EXPORT_SYMBOL(s3c_device_nand);
 static struct resource s3c_usbgadget_resource[] = {
        [0] = {
                .start = S3C2410_PA_USBDEV,
-               .end   = S3C2410_PA_USBDEV + S3C24XX_SZ_USBDEV,
+               .end   = S3C2410_PA_USBDEV + S3C24XX_SZ_USBDEV - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -161,7 +165,7 @@ EXPORT_SYMBOL(s3c_device_usbgadget);
 static struct resource s3c_wdt_resource[] = {
        [0] = {
                .start = S3C2410_PA_WATCHDOG,
-               .end   = S3C2410_PA_WATCHDOG + S3C24XX_SZ_WATCHDOG,
+               .end   = S3C2410_PA_WATCHDOG + S3C24XX_SZ_WATCHDOG - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -186,7 +190,7 @@ EXPORT_SYMBOL(s3c_device_wdt);
 static struct resource s3c_i2c_resource[] = {
        [0] = {
                .start = S3C2410_PA_IIC,
-               .end   = S3C2410_PA_IIC + S3C24XX_SZ_IIC,
+               .end   = S3C2410_PA_IIC + S3C24XX_SZ_IIC - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -211,7 +215,7 @@ EXPORT_SYMBOL(s3c_device_i2c);
 static struct resource s3c_iis_resource[] = {
        [0] = {
                .start = S3C2410_PA_IIS,
-               .end   = S3C2410_PA_IIS + S3C24XX_SZ_IIS,
+               .end   = S3C2410_PA_IIS + S3C24XX_SZ_IIS -1,
                .flags = IORESOURCE_MEM,
        }
 };
@@ -265,7 +269,7 @@ EXPORT_SYMBOL(s3c_device_rtc);
 static struct resource s3c_adc_resource[] = {
        [0] = {
                .start = S3C2410_PA_ADC,
-               .end   = S3C2410_PA_ADC + S3C24XX_SZ_ADC,
+               .end   = S3C2410_PA_ADC + S3C24XX_SZ_ADC - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -288,7 +292,7 @@ struct platform_device s3c_device_adc = {
 static struct resource s3c_sdi_resource[] = {
        [0] = {
                .start = S3C2410_PA_SDI,
-               .end   = S3C2410_PA_SDI + S3C24XX_SZ_SDI,
+               .end   = S3C2410_PA_SDI + S3C24XX_SZ_SDI - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -465,7 +469,7 @@ EXPORT_SYMBOL(s3c_device_timer3);
 static struct resource s3c_camif_resource[] = {
        [0] = {
                .start = S3C2440_PA_CAMIF,
-               .end   = S3C2440_PA_CAMIF + S3C2440_SZ_CAMIF,
+               .end   = S3C2440_PA_CAMIF + S3C2440_SZ_CAMIF - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
index 94f1776cf3127dd3169f785ef70d8dd543dbdaa7..23ea3d5fa09c1be9bcc9084c1c8203b72b7c80c4 100644 (file)
@@ -30,6 +30,7 @@
  *     04-Oct-2004  BJD  Added irq filter controls for GPIO
  *     05-Nov-2004  BJD  EXPORT_SYMBOL() added for all code
  *     13-Mar-2005  BJD  Updates for __iomem
+ *     26-Oct-2005  BJD  Added generic configuration types
  */
 
 
@@ -58,6 +59,27 @@ void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function)
                mask = 3 << S3C2410_GPIO_OFFSET(pin)*2;
        }
 
+       switch (function) {
+       case S3C2410_GPIO_LEAVE:
+               mask = 0;
+               function = 0;
+               break;
+
+       case S3C2410_GPIO_INPUT:
+       case S3C2410_GPIO_OUTPUT:
+       case S3C2410_GPIO_SFN2:
+       case S3C2410_GPIO_SFN3:
+               if (pin < S3C2410_GPIO_BANKB) {
+                       function &= 1;
+                       function <<= S3C2410_GPIO_OFFSET(pin);
+               } else {
+                       function &= 3;
+                       function <<= S3C2410_GPIO_OFFSET(pin)*2;
+               }
+       }
+
+       /* modify the specified register wwith IRQs off */
+
        local_irq_save(flags);
 
        con  = __raw_readl(base + 0x00);
index 7b51bfd0ba6d51ea797f2779ebe5d9c1d1959e40..c1b5c63ec24a7b31bf7a838158a2e3cb6964a10b 100644 (file)
@@ -32,6 +32,7 @@
  *     25-Jul-2005 BJD  Removed ASIX static mappings
  *     27-Jul-2005 BJD  Ensure maximum frequency of i2c bus
  *     20-Sep-2005 BJD  Added static to non-exported items
+ *     26-Oct-2005 BJD  Added FB platform data
 */
 
 #include <linux/kernel.h>
 #include <asm/arch/regs-gpio.h>
 #include <asm/arch/regs-mem.h>
 #include <asm/arch/regs-lcd.h>
+
 #include <asm/arch/nand.h>
 #include <asm/arch/iic.h>
+#include <asm/arch/fb.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
@@ -399,6 +402,38 @@ static struct s3c2410_platform_i2c bast_i2c_info = {
        .max_freq       = 130*1000,
 };
 
+
+static struct s3c2410fb_mach_info __initdata bast_lcd_info = {
+       .width          = 640,
+       .height         = 480,
+
+       .xres           = {
+               .min            = 320,
+               .max            = 1024,
+               .defval         = 640,
+       },
+
+       .yres           = {
+               .min            = 240,
+               .max            = 600,
+               .defval         = 480,
+       },
+
+       .bpp            = {
+               .min            = 4,
+               .max            = 16,
+               .defval         = 8,
+       },
+
+       .regs           = {
+               .lcdcon1        = 0x00000176,
+               .lcdcon2        = 0x1d77c7c2,
+               .lcdcon3        = 0x013a7f13,
+               .lcdcon4        = 0x00000057,
+               .lcdcon5        = 0x00014b02,
+       }
+};
+
 /* Standard BAST devices */
 
 static struct platform_device *bast_devices[] __initdata = {
@@ -454,6 +489,10 @@ static void __init bast_map_io(void)
        usb_simtec_init();
 }
 
+static void __init bast_init(void)
+{
+       s3c24xx_fb_set_platdata(&bast_lcd_info);
+}
 
 MACHINE_START(BAST, "Simtec-BAST")
        /* Maintainer: Ben Dooks <ben@simtec.co.uk> */
@@ -463,5 +502,6 @@ MACHINE_START(BAST, "Simtec-BAST")
        .boot_params    = S3C2410_SDRAM_PA + 0x100,
        .map_io         = bast_map_io,
        .init_irq       = s3c24xx_init_irq,
+       .init_machine   = bast_init,
        .timer          = &s3c24xx_timer,
 MACHINE_END
index fb3cb01266e513e27c5871d008eec550656fad73..7efeaaad2361e71f2b75d4b2beb29ae493a329b1 100644 (file)
@@ -25,6 +25,7 @@
  *     14-Jan-2005 BJD  Added clock init
  *     10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
  *     20-Sep-2005 BJD  Added static to non-exported items
+ *     26-Oct-2005 BJD  Changed name of fb init call
 */
 
 #include <linux/kernel.h>
@@ -164,7 +165,7 @@ static void __init h1940_init_irq(void)
 
 static void __init h1940_init(void)
 {
-       set_s3c2410fb_info(&h1940_lcdcfg);
+       s3c24xx_fb_set_platdata(&h1940_lcdcfg);
 }
 
 MACHINE_START(H1940, "IPAQ-H1940")
index 722ef46b630ac12bbb40eb495baef8dfae10b208..6950e61b79149022c368beac1a6d2b910c812f9c 100644 (file)
@@ -19,6 +19,7 @@
  *     10-Mar-2005 LCVR  Replaced S3C2410_VA by S3C24XX_VA
  *     14-Mar-2005 BJD   void __iomem fixes
  *     20-Sep-2005 BJD   Added static to non-exported items
+ *     26-Oct-2005 BJD   Added framebuffer data
 */
 
 #include <linux/kernel.h>
 //#include <asm/debug-ll.h>
 #include <asm/arch/regs-serial.h>
 #include <asm/arch/regs-gpio.h>
+#include <asm/arch/regs-lcd.h>
+
 #include <asm/arch/idle.h>
+#include <asm/arch/fb.h>
 
 #include "s3c2410.h"
 #include "s3c2440.h"
@@ -86,6 +90,70 @@ static struct s3c2410_uartcfg smdk2440_uartcfgs[] = {
        }
 };
 
+/* LCD driver info */
+
+static struct s3c2410fb_mach_info smdk2440_lcd_cfg __initdata = {
+       .regs   = {
+
+               .lcdcon1        = S3C2410_LCDCON1_TFT16BPP |
+                                 S3C2410_LCDCON1_TFT |
+                                 S3C2410_LCDCON1_CLKVAL(0x04),
+
+               .lcdcon2        = S3C2410_LCDCON2_VBPD(7) |
+                                 S3C2410_LCDCON2_LINEVAL(319) |
+                                 S3C2410_LCDCON2_VFPD(6) |
+                                 S3C2410_LCDCON2_VSPW(3),
+
+               .lcdcon3        = S3C2410_LCDCON3_HBPD(19) |
+                                 S3C2410_LCDCON3_HOZVAL(239) |
+                                 S3C2410_LCDCON3_HFPD(7),
+
+               .lcdcon4        = S3C2410_LCDCON4_MVAL(0) |
+                                 S3C2410_LCDCON4_HSPW(3),
+
+               .lcdcon5        = S3C2410_LCDCON5_FRM565 |
+                                 S3C2410_LCDCON5_INVVLINE |
+                                 S3C2410_LCDCON5_INVVFRAME |
+                                 S3C2410_LCDCON5_PWREN |
+                                 S3C2410_LCDCON5_HWSWP,
+       },
+
+#if 0
+       /* currently setup by downloader */
+       .gpccon         = 0xaa940659,
+       .gpccon_mask    = 0xffffffff,
+       .gpcup          = 0x0000ffff,
+       .gpcup_mask     = 0xffffffff,
+       .gpdcon         = 0xaa84aaa0,
+       .gpdcon_mask    = 0xffffffff,
+       .gpdup          = 0x0000faff,
+       .gpdup_mask     = 0xffffffff,
+#endif
+
+       .lpcsel         = ((0xCE6) & ~7) | 1<<4,
+
+       .width          = 240,
+       .height         = 320,
+
+       .xres           = {
+               .min    = 240,
+               .max    = 240,
+               .defval = 240,
+       },
+
+       .yres           = {
+               .min    = 320,
+               .max    = 320,
+               .defval = 320,
+       },
+
+       .bpp            = {
+               .min    = 16,
+               .max    = 16,
+               .defval = 16,
+       },
+};
+
 static struct platform_device *smdk2440_devices[] __initdata = {
        &s3c_device_usb,
        &s3c_device_lcd,
@@ -121,6 +189,8 @@ static void __init smdk2440_machine_init(void)
        s3c2410_gpio_setpin(S3C2410_GPF6, 0);
        s3c2410_gpio_setpin(S3C2410_GPF7, 0);
 
+       s3c24xx_fb_set_platdata(&smdk2440_lcd_cfg);
+
        s3c2410_pm_init();
 }
 
index 24687f511bf53c9bf74ee4d55a332a61a690f162..75efb5da5b6d2874867cd83c58fd37b82e456bde 100644 (file)
@@ -388,9 +388,17 @@ static struct sa1100_port_fns assabet_port_fns __initdata = {
 };
 
 static struct map_desc assabet_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-  { 0xf1000000, 0x12000000, 0x00100000, MT_DEVICE }, /* Board Control Register */
-  { 0xf2800000, 0x4b800000, 0x00800000, MT_DEVICE }  /* MQ200 */
+       {       /* Board Control Register */
+               .virtual        =  0xf1000000,
+               .pfn            = __phys_to_pfn(0x12000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* MQ200 */
+               .virtual        =  0xf2800000,
+               .pfn            = __phys_to_pfn(0x4b800000),
+               .length         = 0x00800000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init assabet_map_io(void)
index b6169cb091967d62c9c4b55920af89b2b51d4a84..c92cebff7f8e758b653b053c612b82cb8041ba92 100644 (file)
@@ -254,10 +254,22 @@ EXPORT_SYMBOL(badge4_set_5V);
 
 
 static struct map_desc badge4_io_desc[] __initdata = {
-  /*  virtual    physical    length    type */
-  {0xf1000000, 0x08000000, 0x00100000, MT_DEVICE },/* SRAM  bank 1 */
-  {0xf2000000, 0x10000000, 0x00100000, MT_DEVICE },/* SRAM  bank 2 */
-  {0xf4000000, 0x48000000, 0x00100000, MT_DEVICE } /* SA-1111      */
+       {       /* SRAM  bank 1 */
+               .virtual        = 0xf1000000,
+               .pfn            = __phys_to_pfn(0x08000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* SRAM  bank 2 */
+               .virtual        = 0xf2000000,
+               .pfn            = __phys_to_pfn(0x10000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* SA-1111      */
+               .virtual        = 0xf4000000,
+               .pfn            = __phys_to_pfn(0x48000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void
index 9484be7dc671a0ada6bb834ea480b48b73ff6863..23cb748852751db95f0437e97c4052f2dd1865c4 100644 (file)
@@ -100,8 +100,12 @@ static void __init cerf_init_irq(void)
 }
 
 static struct map_desc cerf_io_desc[] __initdata = {
-  /* virtual    physical    length      type */
-  { 0xf0000000, 0x08000000, 0x00100000, MT_DEVICE }  /* Crystal Ethernet Chip */
+       {       /* Crystal Ethernet Chip */
+               .virtual        =  0xf0000000,
+               .pfn            = __phys_to_pfn(0x08000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init cerf_map_io(void)
index 6ecab7e2c238789ee59f72830a495b68e13253ae..7fd6e29c36b79d6fb1cc7091695a6c5a4b56771a 100644 (file)
@@ -171,9 +171,17 @@ static void __init collie_init(void)
 }
 
 static struct map_desc collie_io_desc[] __initdata = {
-       /* virtual     physical    length      type */
-       {0xe8000000, 0x00000000, 0x02000000, MT_DEVICE},        /* 32M main flash (cs0) */
-       {0xea000000, 0x08000000, 0x02000000, MT_DEVICE},        /* 32M boot flash (cs1) */
+       {       /* 32M main flash (cs0) */
+               .virtual        = 0xe8000000,
+               .pfn            = __phys_to_pfn(0x00000000),
+               .length         = 0x02000000,
+               .type           = MT_DEVICE
+       }, {    /* 32M boot flash (cs1) */
+               .virtual        = 0xea000000,
+               .pfn            = __phys_to_pfn(0x08000000),
+               .length         = 0x02000000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init collie_map_io(void)
index 3f1e358455e51578ca01f16d6cde1a3141fbfb50..93619497779cf46ec4992e454368bb365b1c56ec 100644 (file)
@@ -369,11 +369,27 @@ EXPORT_SYMBOL(sa1100fb_lcd_power);
  */
 
 static struct map_desc standard_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-  { 0xf8000000, 0x80000000, 0x00100000, MT_DEVICE }, /* PCM */
-  { 0xfa000000, 0x90000000, 0x00100000, MT_DEVICE }, /* SCM */
-  { 0xfc000000, 0xa0000000, 0x00100000, MT_DEVICE }, /* MER */
-  { 0xfe000000, 0xb0000000, 0x00200000, MT_DEVICE }  /* LCD + DMA */
+       {       /* PCM */
+               .virtual        =  0xf8000000,
+               .pfn            = __phys_to_pfn(0x80000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* SCM */
+               .virtual        =  0xfa000000,
+               .pfn            = __phys_to_pfn(0x90000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* MER */
+               .virtual        =  0xfc000000,
+               .pfn            = __phys_to_pfn(0xa0000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* LCD + DMA */
+               .virtual        =  0xfe000000,
+               .pfn            = __phys_to_pfn(0xb0000000),
+               .length         = 0x00200000,
+               .type           = MT_DEVICE
+       },
 };
 
 void __init sa1100_map_io(void)
index e7aa2681ca6419a13255939bfa52beb3a65eaf72..e8352b7f74b04df8b02ca55900979e864a9e1d0e 100644 (file)
@@ -223,10 +223,22 @@ static void h3xxx_lcd_power(int enable)
 }
 
 static struct map_desc h3600_io_desc[] __initdata = {
- /* virtual           physical           length      type */
-  { H3600_BANK_2_VIRT, SA1100_CS2_PHYS,   0x02800000, MT_DEVICE }, /* static memory bank 2  CS#2 */
-  { H3600_BANK_4_VIRT, SA1100_CS4_PHYS,   0x00800000, MT_DEVICE }, /* static memory bank 4  CS#4 */
-  { H3600_EGPIO_VIRT,  H3600_EGPIO_PHYS,  0x01000000, MT_DEVICE }, /* EGPIO 0          CS#5 */
+       {       /* static memory bank 2  CS#2 */
+               .virtual        =  H3600_BANK_2_VIRT,
+               .pfn            = __phys_to_pfn(SA1100_CS2_PHYS),
+               .length         = 0x02800000,
+               .type           = MT_DEVICE
+       }, {    /* static memory bank 4  CS#4 */
+               .virtual        =  H3600_BANK_4_VIRT,
+               .pfn            = __phys_to_pfn(SA1100_CS4_PHYS),
+               .length         = 0x00800000,
+               .type           = MT_DEVICE
+       }, {    /* EGPIO 0              CS#5 */
+               .virtual        =  H3600_EGPIO_VIRT,
+               .pfn            = __phys_to_pfn(H3600_EGPIO_PHYS),
+               .length         = 0x01000000,
+               .type           = MT_DEVICE
+       }
 };
 
 /*
index 502d65cfe6543ba375ec4e6cf0386cfb0678a521..c922e043c4246166e01fca70bc1291fa345a7eb2 100644 (file)
@@ -57,8 +57,12 @@ static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
  */
 
 static struct map_desc hackkit_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-  { 0xe8000000, 0x00000000, 0x01000000, MT_DEVICE } /* Flash bank 0 */
+       {       /* Flash bank 0 */
+               .virtual        =  0xe8000000,
+               .pfn            = __phys_to_pfn(0x00000000),
+               .length         = 0x01000000,
+               .type           = MT_DEVICE
+       },
 };
 
 static struct sa1100_port_fns hackkit_port_fns __initdata = {
index 2f497112c96a176c0d72b90095c66ffb1d0b5536..9c363bfcf31077aa035e43fce6fcfac082032ca9 100644 (file)
@@ -81,10 +81,22 @@ static int __init jornada720_init(void)
 arch_initcall(jornada720_init);
 
 static struct map_desc jornada720_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-  { 0xf0000000, 0x48000000, 0x00100000, MT_DEVICE }, /* Epson registers */
-  { 0xf1000000, 0x48200000, 0x00100000, MT_DEVICE }, /* Epson frame buffer */
-  { 0xf4000000, 0x40000000, 0x00100000, MT_DEVICE }  /* SA-1111 */
+       {       /* Epson registers */
+               .virtual        =  0xf0000000,
+               .pfn            = __phys_to_pfn(0x48000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* Epson frame buffer */
+               .virtual        =  0xf1000000,
+               .pfn            = __phys_to_pfn(0x48200000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }, {    /* SA-1111 */
+               .virtual        =  0xf4000000,
+               .pfn            = __phys_to_pfn(0x40000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init jornada720_map_io(void)
index ed6744d480aff0f04e2b1bd6a1f742b1e01d3b23..8c9e3dd5294269ce89084f0384055292eda9fe20 100644 (file)
@@ -31,9 +31,17 @@ static void __init lart_init(void)
 }
 
 static struct map_desc lart_io_desc[] __initdata = {
- /* virtual     physical    length      type */
-  { 0xe8000000, 0x00000000, 0x00400000, MT_DEVICE }, /* main flash memory */
-  { 0xec000000, 0x08000000, 0x00400000, MT_DEVICE }  /* main flash, alternative location */
+       {       /* main flash memory */
+               .virtual        =  0xe8000000,
+               .pfn            = __phys_to_pfn(0x00000000),
+               .length         = 0x00400000,
+               .type           = MT_DEVICE
+       }, {    /* main flash, alternative location */
+               .virtual        =  0xec000000,
+               .pfn            = __phys_to_pfn(0x08000000),
+               .length         = 0x00400000,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init lart_map_io(void)
index fc061641b7be083cb44d89e33fa22c3826e9c272..0c5eff3bdc09c14996a1c7c309de59cb31314425 100644 (file)
@@ -331,9 +331,17 @@ static int __init neponset_init(void)
 subsys_initcall(neponset_init);
 
 static struct map_desc neponset_io_desc[] __initdata = {
- /* virtual     physical    length type */
-  { 0xf3000000, 0x10000000, SZ_1M, MT_DEVICE }, /* System Registers */
-  { 0xf4000000, 0x40000000, SZ_1M, MT_DEVICE }  /* SA-1111 */
+       {       /* System Registers */
+               .virtual        =  0xf3000000,
+               .pfn            = __phys_to_pfn(0x10000000),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }, {    /* SA-1111 */
+               .virtual        =  0xf4000000,
+               .pfn            = __phys_to_pfn(0x40000000),
+               .length         = SZ_1M,
+               .type           = MT_DEVICE
+       }
 };
 
 void __init neponset_map_io(void)
index 07f6d5fd7bb0a0108f149d8323073012dfe0a9d2..cfb6658e5cdf5fac50e4d9256947d47268483f02 100644 (file)
@@ -60,11 +60,17 @@ EXPORT_SYMBOL(set_cs3_bit);
 EXPORT_SYMBOL(clear_cs3_bit);
 
 static struct map_desc simpad_io_desc[] __initdata = {
-        /* virtual     physical    length      type */
-       /* MQ200 */
-       { 0xf2800000, 0x4b800000, 0x00800000, MT_DEVICE },
-       /* Paules CS3, write only */
-       { 0xf1000000, 0x18000000, 0x00100000, MT_DEVICE },
+       {       /* MQ200 */
+               .virtual        =  0xf2800000,
+               .pfn            = __phys_to_pfn(0x4b800000),
+               .length         = 0x00800000,
+               .type           = MT_DEVICE
+       }, {    /* Paules CS3, write only */
+               .virtual        =  0xf1000000,
+               .pfn            = __phys_to_pfn(0x18000000),
+               .length         = 0x00100000,
+               .type           = MT_DEVICE
+       },
 };
 
 
index 946c0d11c73b6d80c0dd8e60c8725ba478dd270b..2d428b6dbb58a808e3384f643dd1888d7fa06645 100644 (file)
@@ -62,7 +62,12 @@ arch_initcall(shark_init);
 extern void shark_init_irq(void);
 
 static struct map_desc shark_io_desc[] __initdata = {
-       { IO_BASE       , IO_START      , IO_SIZE       , MT_DEVICE }
+       {
+               .virtual        = IO_BASE,
+               .pfn            = __phys_to_pfn(IO_START),
+               .length         = IO_SIZE,
+               .type           = MT_DEVICE
+       }
 };
 
 static void __init shark_map_io(void)
index a30e0451df72230d731978257c23e8570de6f72c..7e4bdd07f4afb4c82778a4c3c796d39684587103 100644 (file)
@@ -186,25 +186,82 @@ void __init versatile_init_irq(void)
 }
 
 static struct map_desc versatile_io_desc[] __initdata = {
- { IO_ADDRESS(VERSATILE_SYS_BASE),   VERSATILE_SYS_BASE,   SZ_4K,      MT_DEVICE },
- { IO_ADDRESS(VERSATILE_SIC_BASE),   VERSATILE_SIC_BASE,   SZ_4K,      MT_DEVICE },
- { IO_ADDRESS(VERSATILE_VIC_BASE),   VERSATILE_VIC_BASE,   SZ_4K,      MT_DEVICE },
- { IO_ADDRESS(VERSATILE_SCTL_BASE),  VERSATILE_SCTL_BASE,  SZ_4K * 9,  MT_DEVICE },
+       {
+               .virtual        =  IO_ADDRESS(VERSATILE_SYS_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_SYS_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  IO_ADDRESS(VERSATILE_SIC_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_SIC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  IO_ADDRESS(VERSATILE_VIC_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_VIC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  IO_ADDRESS(VERSATILE_SCTL_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_SCTL_BASE),
+               .length         = SZ_4K * 9,
+               .type           = MT_DEVICE
+       },
 #ifdef CONFIG_MACH_VERSATILE_AB
- { IO_ADDRESS(VERSATILE_GPIO0_BASE), VERSATILE_GPIO0_BASE, SZ_4K,      MT_DEVICE },
- { IO_ADDRESS(VERSATILE_IB2_BASE),   VERSATILE_IB2_BASE,   SZ_64M,     MT_DEVICE },
+       {
+               .virtual        =  IO_ADDRESS(VERSATILE_GPIO0_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_GPIO0_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  IO_ADDRESS(VERSATILE_IB2_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_IB2_BASE),
+               .length         = SZ_64M,
+               .type           = MT_DEVICE
+       },
 #endif
 #ifdef CONFIG_DEBUG_LL
- { IO_ADDRESS(VERSATILE_UART0_BASE), VERSATILE_UART0_BASE, SZ_4K,      MT_DEVICE },
+       {
+               .virtual        =  IO_ADDRESS(VERSATILE_UART0_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_UART0_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       },
 #endif
 #ifdef CONFIG_PCI
- { IO_ADDRESS(VERSATILE_PCI_CORE_BASE), VERSATILE_PCI_CORE_BASE, SZ_4K, MT_DEVICE },
- { VERSATILE_PCI_VIRT_BASE,          VERSATILE_PCI_BASE,   VERSATILE_PCI_BASE_SIZE, MT_DEVICE },
- { VERSATILE_PCI_CFG_VIRT_BASE,      VERSATILE_PCI_CFG_BASE, VERSATILE_PCI_CFG_BASE_SIZE, MT_DEVICE },
+       {
+               .virtual        =  IO_ADDRESS(VERSATILE_PCI_CORE_BASE),
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_CORE_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  VERSATILE_PCI_VIRT_BASE,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_BASE),
+               .length         = VERSATILE_PCI_BASE_SIZE,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  VERSATILE_PCI_CFG_VIRT_BASE,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
+               .length         = VERSATILE_PCI_CFG_BASE_SIZE,
+               .type           = MT_DEVICE
+       },
 #if 0
- { VERSATILE_PCI_VIRT_MEM_BASE0,     VERSATILE_PCI_MEM_BASE0, SZ_16M,  MT_DEVICE },
- { VERSATILE_PCI_VIRT_MEM_BASE1,     VERSATILE_PCI_MEM_BASE1, SZ_16M,  MT_DEVICE },
- { VERSATILE_PCI_VIRT_MEM_BASE2,     VERSATILE_PCI_MEM_BASE2, SZ_16M,  MT_DEVICE },
+       {
+               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE0,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE1,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE2,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       },
 #endif
 #endif
 };
index 26356ce4da5448165bcac172139deef54a008462..82f4d5e27c5492f98dac798f807194eb6800fc03 100644 (file)
@@ -75,7 +75,7 @@ static struct vm_region consistent_head = {
 };
 
 static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, int gfp)
+vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
 {
        unsigned long addr = head->vm_start, end = head->vm_end - size;
        unsigned long flags;
@@ -133,7 +133,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
 #endif
 
 static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp,
+__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
            pgprot_t prot)
 {
        struct page *page;
@@ -251,7 +251,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp,
  * virtual and bus address for that space.
  */
 void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
 {
        return __dma_alloc(dev, size, handle, gfp,
                           pgprot_noncached(pgprot_kernel));
@@ -263,7 +263,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
  * dma_alloc_coherent above.
  */
 void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
 {
        return __dma_alloc(dev, size, handle, gfp,
                           pgprot_writecombine(pgprot_kernel));
index edffa47a4b2aab8b3e05f42f65bf20e496fc96d5..f4496813615ae01dba12cd8dced1a8ea82173c67 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  linux/arch/arm/mm/init.c
  *
- *  Copyright (C) 1995-2002 Russell King
+ *  Copyright (C) 1995-2005 Russell King
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -86,14 +86,19 @@ void show_mem(void)
        printk("%d pages swap cached\n", cached);
 }
 
-struct node_info {
-       unsigned int start;
-       unsigned int end;
-       int bootmap_pages;
-};
+static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
+{
+       return pmd_offset(pgd, virt);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long virt)
+{
+       return pmd_off(pgd_offset_k(virt), virt);
+}
 
-#define O_PFN_DOWN(x)  ((x) >> PAGE_SHIFT)
-#define O_PFN_UP(x)    (PAGE_ALIGN(x) >> PAGE_SHIFT)
+#define for_each_nodebank(iter,mi,no)                  \
+       for (iter = 0; iter < mi->nr_banks; iter++)     \
+               if (mi->bank[iter].node == no)
 
 /*
  * FIXME: We really want to avoid allocating the bootmap bitmap
@@ -106,15 +111,12 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
 {
        unsigned int start_pfn, bank, bootmap_pfn;
 
-       start_pfn   = O_PFN_UP(__pa(&_end));
+       start_pfn   = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
        bootmap_pfn = 0;
 
-       for (bank = 0; bank < mi->nr_banks; bank ++) {
+       for_each_nodebank(bank, mi, node) {
                unsigned int start, end;
 
-               if (mi->bank[bank].node != node)
-                       continue;
-
                start = mi->bank[bank].start >> PAGE_SHIFT;
                end   = (mi->bank[bank].size +
                         mi->bank[bank].start) >> PAGE_SHIFT;
@@ -140,92 +142,6 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
        return bootmap_pfn;
 }
 
-/*
- * Scan the memory info structure and pull out:
- *  - the end of memory
- *  - the number of nodes
- *  - the pfn range of each node
- *  - the number of bootmem bitmap pages
- */
-static unsigned int __init
-find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
-{
-       unsigned int i, bootmem_pages = 0, memend_pfn = 0;
-
-       for (i = 0; i < MAX_NUMNODES; i++) {
-               np[i].start = -1U;
-               np[i].end = 0;
-               np[i].bootmap_pages = 0;
-       }
-
-       for (i = 0; i < mi->nr_banks; i++) {
-               unsigned long start, end;
-               int node;
-
-               if (mi->bank[i].size == 0) {
-                       /*
-                        * Mark this bank with an invalid node number
-                        */
-                       mi->bank[i].node = -1;
-                       continue;
-               }
-
-               node = mi->bank[i].node;
-
-               /*
-                * Make sure we haven't exceeded the maximum number of nodes
-                * that we have in this configuration.  If we have, we're in
-                * trouble.  (maybe we ought to limit, instead of bugging?)
-                */
-               if (node >= MAX_NUMNODES)
-                       BUG();
-               node_set_online(node);
-
-               /*
-                * Get the start and end pfns for this bank
-                */
-               start = mi->bank[i].start >> PAGE_SHIFT;
-               end   = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT;
-
-               if (np[node].start > start)
-                       np[node].start = start;
-
-               if (np[node].end < end)
-                       np[node].end = end;
-
-               if (memend_pfn < end)
-                       memend_pfn = end;
-       }
-
-       /*
-        * Calculate the number of pages we require to
-        * store the bootmem bitmaps.
-        */
-       for_each_online_node(i) {
-               if (np[i].end == 0)
-                       continue;
-
-               np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end -
-                                                           np[i].start);
-               bootmem_pages += np[i].bootmap_pages;
-       }
-
-       high_memory = __va(memend_pfn << PAGE_SHIFT);
-
-       /*
-        * This doesn't seem to be used by the Linux memory
-        * manager any more.  If we can get rid of it, we
-        * also get rid of some of the stuff above as well.
-        *
-        * Note: max_low_pfn and max_pfn reflect the number
-        * of _pages_ in the system, not the maximum PFN.
-        */
-       max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
-       max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
-
-       return bootmem_pages;
-}
-
 static int __init check_initrd(struct meminfo *mi)
 {
        int initrd_node = -2;
@@ -266,9 +182,8 @@ static int __init check_initrd(struct meminfo *mi)
 /*
  * Reserve the various regions of node 0
  */
-static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages)
+static __init void reserve_node_zero(pg_data_t *pgdat)
 {
-       pg_data_t *pgdat = NODE_DATA(0);
        unsigned long res_size = 0;
 
        /*
@@ -288,13 +203,6 @@ static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int boot
        reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
                             PTRS_PER_PGD * sizeof(pgd_t));
 
-       /*
-        * And don't forget to reserve the allocator bitmap,
-        * which will be freed later.
-        */
-       reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
-                            bootmap_pages << PAGE_SHIFT);
-
        /*
         * Hmm... This should go elsewhere, but we really really need to
         * stop things allocating the low memory; ideally we need a better
@@ -324,183 +232,276 @@ static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int boot
                reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
 }
 
-/*
- * Register all available RAM in this node with the bootmem allocator.
- */
-static inline void free_bootmem_node_bank(int node, struct meminfo *mi)
+void __init build_mem_type_table(void);
+void __init create_mapping(struct map_desc *md);
+
+static unsigned long __init
+bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
 {
-       pg_data_t *pgdat = NODE_DATA(node);
-       int bank;
+       unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+       unsigned long start_pfn, end_pfn, boot_pfn;
+       unsigned int boot_pages;
+       pg_data_t *pgdat;
+       int i;
 
-       for (bank = 0; bank < mi->nr_banks; bank++)
-               if (mi->bank[bank].node == node)
-                       free_bootmem_node(pgdat, mi->bank[bank].start,
-                                         mi->bank[bank].size);
-}
+       start_pfn = -1UL;
+       end_pfn = 0;
 
-/*
- * Initialise the bootmem allocator for all nodes.  This is called
- * early during the architecture specific initialisation.
- */
-static void __init bootmem_init(struct meminfo *mi)
-{
-       struct node_info node_info[MAX_NUMNODES], *np = node_info;
-       unsigned int bootmap_pages, bootmap_pfn, map_pg;
-       int node, initrd_node;
+       /*
+        * Calculate the pfn range, and map the memory banks for this node.
+        */
+       for_each_nodebank(i, mi, node) {
+               unsigned long start, end;
+               struct map_desc map;
 
-       bootmap_pages = find_memend_and_nodes(mi, np);
-       bootmap_pfn   = find_bootmap_pfn(0, mi, bootmap_pages);
-       initrd_node   = check_initrd(mi);
+               start = mi->bank[i].start >> PAGE_SHIFT;
+               end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT;
 
-       map_pg = bootmap_pfn;
+               if (start_pfn > start)
+                       start_pfn = start;
+               if (end_pfn < end)
+                       end_pfn = end;
+
+               map.pfn = __phys_to_pfn(mi->bank[i].start);
+               map.virtual = __phys_to_virt(mi->bank[i].start);
+               map.length = mi->bank[i].size;
+               map.type = MT_MEMORY;
+
+               create_mapping(&map);
+       }
 
        /*
-        * Initialise the bootmem nodes.
-        *
-        * What we really want to do is:
-        *
-        *   unmap_all_regions_except_kernel();
-        *   for_each_node_in_reverse_order(node) {
-        *     map_node(node);
-        *     allocate_bootmem_map(node);
-        *     init_bootmem_node(node);
-        *     free_bootmem_node(node);
-        *   }
-        *
-        * but this is a 2.5-type change.  For now, we just set
-        * the nodes up in reverse order.
-        *
-        * (we could also do with rolling bootmem_init and paging_init
-        * into one generic "memory_init" type function).
+        * If there is no memory in this node, ignore it.
         */
-       np += num_online_nodes() - 1;
-       for (node = num_online_nodes() - 1; node >= 0; node--, np--) {
-               /*
-                * If there are no pages in this node, ignore it.
-                * Note that node 0 must always have some pages.
-                */
-               if (np->end == 0 || !node_online(node)) {
-                       if (node == 0)
-                               BUG();
-                       continue;
-               }
+       if (end_pfn == 0)
+               return end_pfn;
 
-               /*
-                * Initialise the bootmem allocator.
-                */
-               init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end);
-               free_bootmem_node_bank(node, mi);
-               map_pg += np->bootmap_pages;
+       /*
+        * Allocate the bootmem bitmap page.
+        */
+       boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+       boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
 
-               /*
-                * If this is node 0, we need to reserve some areas ASAP -
-                * we may use bootmem on node 0 to setup the other nodes.
-                */
-               if (node == 0)
-                       reserve_node_zero(bootmap_pfn, bootmap_pages);
-       }
+       /*
+        * Initialise the bootmem allocator for this node, handing the
+        * memory banks over to bootmem.
+        */
+       node_set_online(node);
+       pgdat = NODE_DATA(node);
+       init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
 
+       for_each_nodebank(i, mi, node)
+               free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size);
+
+       /*
+        * Reserve the bootmem bitmap for this node.
+        */
+       reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
+                            boot_pages << PAGE_SHIFT);
 
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (phys_initrd_size && initrd_node >= 0) {
-               reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start,
+       /*
+        * If the initrd is in this node, reserve its memory.
+        */
+       if (node == initrd_node) {
+               reserve_bootmem_node(pgdat, phys_initrd_start,
                                     phys_initrd_size);
                initrd_start = __phys_to_virt(phys_initrd_start);
                initrd_end = initrd_start + phys_initrd_size;
        }
 #endif
 
-       BUG_ON(map_pg != bootmap_pfn + bootmap_pages);
+       /*
+        * Finally, reserve any node zero regions.
+        */
+       if (node == 0)
+               reserve_node_zero(pgdat);
+
+       /*
+        * initialise the zones within this node.
+        */
+       memset(zone_size, 0, sizeof(zone_size));
+       memset(zhole_size, 0, sizeof(zhole_size));
+
+       /*
+        * The size of this node has already been determined.  If we need
+        * to do anything fancy with the allocation of this memory to the
+        * zones, now is the time to do it.
+        */
+       zone_size[0] = end_pfn - start_pfn;
+
+       /*
+        * For each bank in this node, calculate the size of the holes.
+        *  holes = node_size - sum(bank_sizes_in_node)
+        */
+       zhole_size[0] = zone_size[0];
+       for_each_nodebank(i, mi, node)
+               zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
+
+       /*
+        * Adjust the sizes according to any special requirements for
+        * this machine type.
+        */
+       arch_adjust_zones(node, zone_size, zhole_size);
+
+       free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size);
+
+       return end_pfn;
 }
 
-/*
- * paging_init() sets up the page tables, initialises the zone memory
- * maps, and sets up the zero page, bad page and bad page tables.
- */
-void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
+static void __init bootmem_init(struct meminfo *mi)
 {
-       void *zero_page;
-       int node;
+       unsigned long addr, memend_pfn = 0;
+       int node, initrd_node, i;
 
-       bootmem_init(mi);
+       /*
+        * Invalidate the node number for empty or invalid memory banks
+        */
+       for (i = 0; i < mi->nr_banks; i++)
+               if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES)
+                       mi->bank[i].node = -1;
 
        memcpy(&meminfo, mi, sizeof(meminfo));
 
+#ifdef CONFIG_XIP_KERNEL
+#error needs fixing
+       p->pfn        = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PMD_MASK);
+       p->virtual    = (unsigned long)&_stext & PMD_MASK;
+       p->length     = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
+       p->type       = MT_ROM;
+       p ++;
+#endif
+
        /*
-        * allocate the zero page.  Note that we count on this going ok.
+        * Clear out all the mappings below the kernel image.
+        * FIXME: what about XIP?
         */
-       zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
+       for (addr = 0; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+               pmd_clear(pmd_off_k(addr));
 
        /*
-        * initialise the page tables.
+        * Clear out all the kernel space mappings, except for the first
+        * memory bank, up to the end of the vmalloc region.
         */
-       memtable_init(mi);
-       if (mdesc->map_io)
-               mdesc->map_io();
-       local_flush_tlb_all();
+       for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
+            addr < VMALLOC_END; addr += PGDIR_SIZE)
+               pmd_clear(pmd_off_k(addr));
 
        /*
-        * initialise the zones within each node
+        * Locate which node contains the ramdisk image, if any.
         */
-       for_each_online_node(node) {
-               unsigned long zone_size[MAX_NR_ZONES];
-               unsigned long zhole_size[MAX_NR_ZONES];
-               struct bootmem_data *bdata;
-               pg_data_t *pgdat;
-               int i;
+       initrd_node = check_initrd(mi);
 
-               /*
-                * Initialise the zone size information.
-                */
-               for (i = 0; i < MAX_NR_ZONES; i++) {
-                       zone_size[i]  = 0;
-                       zhole_size[i] = 0;
-               }
+       /*
+        * Run through each node initialising the bootmem allocator.
+        */
+       for_each_node(node) {
+               unsigned long end_pfn;
 
-               pgdat = NODE_DATA(node);
-               bdata = pgdat->bdata;
+               end_pfn = bootmem_init_node(node, initrd_node, mi);
 
                /*
-                * The size of this node has already been determined.
-                * If we need to do anything fancy with the allocation
-                * of this memory to the zones, now is the time to do
-                * it.
+                * Remember the highest memory PFN.
                 */
-               zone_size[0] = bdata->node_low_pfn -
-                               (bdata->node_boot_start >> PAGE_SHIFT);
+               if (end_pfn > memend_pfn)
+                       memend_pfn = end_pfn;
+       }
 
-               /*
-                * If this zone has zero size, skip it.
-                */
-               if (!zone_size[0])
-                       continue;
+       high_memory = __va(memend_pfn << PAGE_SHIFT);
 
-               /*
-                * For each bank in this node, calculate the size of the
-                * holes.  holes = node_size - sum(bank_sizes_in_node)
-                */
-               zhole_size[0] = zone_size[0];
-               for (i = 0; i < mi->nr_banks; i++) {
-                       if (mi->bank[i].node != node)
-                               continue;
+       /*
+        * This doesn't seem to be used by the Linux memory manager any
+        * more, but is used by ll_rw_block.  If we can get rid of it, we
+        * also get rid of some of the stuff above as well.
+        *
+        * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
+        * the system, not the maximum PFN.
+        */
+       max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
+}
 
-                       zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
-               }
+/*
+ * Set up device the mappings.  Since we clear out the page tables for all
+ * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * This means you have to be careful how you debug this function, or any
+ * called function.  (Do it by code inspection!)
+ */
+static void __init devicemaps_init(struct machine_desc *mdesc)
+{
+       struct map_desc map;
+       unsigned long addr;
+       void *vectors;
 
-               /*
-                * Adjust the sizes according to any special
-                * requirements for this machine type.
-                */
-               arch_adjust_zones(node, zone_size, zhole_size);
+       for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+               pmd_clear(pmd_off_k(addr));
 
-               free_area_init_node(node, pgdat, zone_size,
-                               bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
+       /*
+        * Map the cache flushing regions.
+        */
+#ifdef FLUSH_BASE
+       map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
+       map.virtual = FLUSH_BASE;
+       map.length = PGDIR_SIZE;
+       map.type = MT_CACHECLEAN;
+       create_mapping(&map);
+#endif
+#ifdef FLUSH_BASE_MINICACHE
+       map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + PGDIR_SIZE);
+       map.virtual = FLUSH_BASE_MINICACHE;
+       map.length = PGDIR_SIZE;
+       map.type = MT_MINICLEAN;
+       create_mapping(&map);
+#endif
+
+       flush_cache_all();
+       local_flush_tlb_all();
+
+       vectors = alloc_bootmem_low_pages(PAGE_SIZE);
+       BUG_ON(!vectors);
+
+       /*
+        * Create a mapping for the machine vectors at the high-vectors
+        * location (0xffff0000).  If we aren't using high-vectors, also
+        * create a mapping at the low-vectors virtual address.
+        */
+       map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+       map.virtual = 0xffff0000;
+       map.length = PAGE_SIZE;
+       map.type = MT_HIGH_VECTORS;
+       create_mapping(&map);
+
+       if (!vectors_high()) {
+               map.virtual = 0;
+               map.type = MT_LOW_VECTORS;
+               create_mapping(&map);
        }
 
        /*
-        * finish off the bad pages once
-        * the mem_map is initialised
+        * Ask the machine support to map in the statically mapped devices.
+        * After this point, we can start to touch devices again.
+        */
+       if (mdesc->map_io)
+               mdesc->map_io();
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
+{
+       void *zero_page;
+
+       build_mem_type_table();
+       bootmem_init(mi);
+       devicemaps_init(mdesc);
+
+       top_pmd = pmd_off_k(0xffff0000);
+
+       /*
+        * allocate the zero page.  Note that we count on this going ok.
         */
+       zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
        memzero(zero_page, PAGE_SIZE);
        empty_zero_page = virt_to_page(zero_page);
        flush_dcache_page(empty_zero_page);
@@ -562,10 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
         * may not be the case, especially if the user has provided the
         * information on the command line.
         */
-       for (i = 0; i < mi->nr_banks; i++) {
-               if (mi->bank[i].size == 0 || mi->bank[i].node != node)
-                       continue;
-
+       for_each_nodebank(i, mi, node) {
                bank_start = mi->bank[i].start >> PAGE_SHIFT;
                if (bank_start < prev_bank_end) {
                        printk(KERN_ERR "MEM: unordered memory banks.  "
index 7110e54182b1e146ce6d26254965436cb971d6d4..6fb1258df1b5d03a279bf4023c1b9fb7083abb41 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/vmalloc.h>
 
 #include <asm/cacheflush.h>
+#include <asm/hardware.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 
index d125a3dc061c8fb5efb27ceed11ce731ddafe4a5..61bc2fa0511ec95380c9f8ccaa114d040290b622 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  linux/arch/arm/mm/mm-armv.c
  *
- *  Copyright (C) 1998-2002 Russell King
+ *  Copyright (C) 1998-2005 Russell King
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -305,16 +305,6 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
        set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
 }
 
-/*
- * Clear any PGD mapping.  On a two-level page table system,
- * the clearance is done by the middle-level functions (pmd)
- * rather than the top-level (pgd) functions.
- */
-static inline void clear_mapping(unsigned long virt)
-{
-       pmd_clear(pmd_off_k(virt));
-}
-
 struct mem_types {
        unsigned int    prot_pte;
        unsigned int    prot_l1;
@@ -373,7 +363,7 @@ static struct mem_types mem_types[] __initdata = {
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */
-static void __init build_mem_type_table(void)
+void __init build_mem_type_table(void)
 {
        struct cachepolicy *cp;
        unsigned int cr = get_cr();
@@ -483,25 +473,25 @@ static void __init build_mem_type_table(void)
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md)
+void __init create_mapping(struct map_desc *md)
 {
        unsigned long virt, length;
        int prot_sect, prot_l1, domain;
        pgprot_t prot_pte;
-       long off;
+       unsigned long off = (u32)__pfn_to_phys(md->pfn);
 
        if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
                printk(KERN_WARNING "BUG: not creating mapping for "
-                      "0x%08lx at 0x%08lx in user region\n",
-                      md->physical, md->virtual);
+                      "0x%016llx at 0x%08lx in user region\n",
+                      __pfn_to_phys((u64)md->pfn), md->virtual);
                return;
        }
 
        if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
            md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
-               printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
+               printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
                       "overlaps vmalloc space\n",
-                      md->physical, md->virtual);
+                      __pfn_to_phys((u64)md->pfn), md->virtual);
        }
 
        domain    = mem_types[md->type].domain;
@@ -509,15 +499,40 @@ static void __init create_mapping(struct map_desc *md)
        prot_l1   = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
        prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
 
+       /*
+        * Catch 36-bit addresses
+        */
+       if(md->pfn >= 0x100000) {
+               if(domain) {
+                       printk(KERN_ERR "MM: invalid domain in supersection "
+                               "mapping for 0x%016llx at 0x%08lx\n",
+                               __pfn_to_phys((u64)md->pfn), md->virtual);
+                       return;
+               }
+               if((md->virtual | md->length | __pfn_to_phys(md->pfn))
+                       & ~SUPERSECTION_MASK) {
+                       printk(KERN_ERR "MM: cannot create mapping for "
+                               "0x%016llx at 0x%08lx invalid alignment\n",
+                               __pfn_to_phys((u64)md->pfn), md->virtual);
+                       return;
+               }
+
+               /*
+                * Shift bits [35:32] of address into bits [23:20] of PMD
+                * (See ARMv6 spec).
+                */
+               off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
+       }
+
        virt   = md->virtual;
-       off    = md->physical - virt;
+       off   -= virt;
        length = md->length;
 
        if (mem_types[md->type].prot_l1 == 0 &&
            (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
                printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
                       "be mapped using pages, ignoring.\n",
-                      md->physical, md->virtual);
+                      __pfn_to_phys(md->pfn), md->virtual);
                return;
        }
 
@@ -535,13 +550,22 @@ static void __init create_mapping(struct map_desc *md)
         *      of the actual domain assignments in use.
         */
        if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
-               /* Align to supersection boundary */
-               while ((virt & ~SUPERSECTION_MASK || (virt + off) &
-                       ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) {
-                       alloc_init_section(virt, virt + off, prot_sect);
-
-                       virt   += (PGDIR_SIZE / 2);
-                       length -= (PGDIR_SIZE / 2);
+               /*
+                * Align to supersection boundary if !high pages.
+                * High pages have already been checked for proper
+                * alignment above and they will fail the SUPSERSECTION_MASK
+                * check because of the way the address is encoded into
+                * offset.
+                */
+               if (md->pfn <= 0x100000) {
+                       while ((virt & ~SUPERSECTION_MASK ||
+                               (virt + off) & ~SUPERSECTION_MASK) &&
+                               length >= (PGDIR_SIZE / 2)) {
+                               alloc_init_section(virt, virt + off, prot_sect);
+
+                               virt   += (PGDIR_SIZE / 2);
+                               length -= (PGDIR_SIZE / 2);
+                       }
                }
 
                while (length >= SUPERSECTION_SIZE) {
@@ -601,100 +625,6 @@ void setup_mm_for_reboot(char mode)
        }
 }
 
-extern void _stext, _etext;
-
-/*
- * Setup initial mappings.  We use the page we allocated for zero page to hold
- * the mappings, which will get overwritten by the vectors in traps_init().
- * The mappings must be in virtual address order.
- */
-void __init memtable_init(struct meminfo *mi)
-{
-       struct map_desc *init_maps, *p, *q;
-       unsigned long address = 0;
-       int i;
-
-       build_mem_type_table();
-
-       init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
-
-#ifdef CONFIG_XIP_KERNEL
-       p->physical   = CONFIG_XIP_PHYS_ADDR & PMD_MASK;
-       p->virtual    = (unsigned long)&_stext & PMD_MASK;
-       p->length     = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
-       p->type       = MT_ROM;
-       p ++;
-#endif
-
-       for (i = 0; i < mi->nr_banks; i++) {
-               if (mi->bank[i].size == 0)
-                       continue;
-
-               p->physical   = mi->bank[i].start;
-               p->virtual    = __phys_to_virt(p->physical);
-               p->length     = mi->bank[i].size;
-               p->type       = MT_MEMORY;
-               p ++;
-       }
-
-#ifdef FLUSH_BASE
-       p->physical   = FLUSH_BASE_PHYS;
-       p->virtual    = FLUSH_BASE;
-       p->length     = PGDIR_SIZE;
-       p->type       = MT_CACHECLEAN;
-       p ++;
-#endif
-
-#ifdef FLUSH_BASE_MINICACHE
-       p->physical   = FLUSH_BASE_PHYS + PGDIR_SIZE;
-       p->virtual    = FLUSH_BASE_MINICACHE;
-       p->length     = PGDIR_SIZE;
-       p->type       = MT_MINICLEAN;
-       p ++;
-#endif
-
-       /*
-        * Go through the initial mappings, but clear out any
-        * pgdir entries that are not in the description.
-        */
-       q = init_maps;
-       do {
-               if (address < q->virtual || q == p) {
-                       clear_mapping(address);
-                       address += PGDIR_SIZE;
-               } else {
-                       create_mapping(q);
-
-                       address = q->virtual + q->length;
-                       address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;
-
-                       q ++;
-               }
-       } while (address != 0);
-
-       /*
-        * Create a mapping for the machine vectors at the high-vectors
-        * location (0xffff0000).  If we aren't using high-vectors, also
-        * create a mapping at the low-vectors virtual address.
-        */
-       init_maps->physical   = virt_to_phys(init_maps);
-       init_maps->virtual    = 0xffff0000;
-       init_maps->length     = PAGE_SIZE;
-       init_maps->type       = MT_HIGH_VECTORS;
-       create_mapping(init_maps);
-
-       if (!vectors_high()) {
-               init_maps->virtual = 0;
-               init_maps->type = MT_LOW_VECTORS;
-               create_mapping(init_maps);
-       }
-
-       flush_cache_all();
-       local_flush_tlb_all();
-
-       top_pmd = pmd_off_k(0xffff0000);
-}
-
 /*
  * Create the architecture specific mappings
  */
index 8ffb523e6c77345c097da32221aa5fdf3f34a58e..6a94e54848fd7a965294c1be8fc72e350878aaee 100644 (file)
@@ -6,6 +6,6 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
                oprofilefs.o oprofile_stats.o \
                timer_int.o )
 
-oprofile-y                             := $(DRIVER_OBJS) init.o backtrace.o
-oprofile-$(CONFIG_CPU_XSCALE)          += common.o op_model_xscale.o
+oprofile-y                             := $(DRIVER_OBJS) common.o backtrace.o
+oprofile-$(CONFIG_CPU_XSCALE)          += op_model_xscale.o
 
index e57dde88289813d785b332de04be2eaebd98a6dd..1415930ceee1a0908d32d540fb8867a1975cdedc 100644 (file)
 #include <linux/init.h>
 #include <linux/oprofile.h>
 #include <linux/errno.h>
-#include <asm/semaphore.h>
 #include <linux/sysdev.h>
+#include <asm/semaphore.h>
 
 #include "op_counter.h"
 #include "op_arm_model.h"
 
-static struct op_arm_model_spec *pmu_model;
-static int pmu_enabled;
-static struct semaphore pmu_sem;
-
-static int pmu_start(void);
-static int pmu_setup(void);
-static void pmu_stop(void);
-static int pmu_create_files(struct super_block *, struct dentry *);
-
-#ifdef CONFIG_PM
-static int pmu_suspend(struct sys_device *dev, pm_message_t state)
-{
-       if (pmu_enabled)
-               pmu_stop();
-       return 0;
-}
-
-static int pmu_resume(struct sys_device *dev)
-{
-       if (pmu_enabled)
-               pmu_start();
-       return 0;
-}
-
-static struct sysdev_class oprofile_sysclass = {
-       set_kset_name("oprofile"),
-       .resume         = pmu_resume,
-       .suspend        = pmu_suspend,
-};
-
-static struct sys_device device_oprofile = {
-       .id             = 0,
-       .cls            = &oprofile_sysclass,
-};
-
-static int __init init_driverfs(void)
-{
-       int ret;
-
-       if (!(ret = sysdev_class_register(&oprofile_sysclass)))
-               ret = sysdev_register(&device_oprofile);
-
-       return ret;
-}
-
-static void  exit_driverfs(void)
-{
-       sysdev_unregister(&device_oprofile);
-       sysdev_class_unregister(&oprofile_sysclass);
-}
-#else
-#define init_driverfs()        do { } while (0)
-#define exit_driverfs() do { } while (0)
-#endif /* CONFIG_PM */
+static struct op_arm_model_spec *op_arm_model;
+static int op_arm_enabled;
+static struct semaphore op_arm_sem;
 
 struct op_counter_config counter_config[OP_MAX_COUNTER];
 
-static int pmu_create_files(struct super_block *sb, struct dentry *root)
+static int op_arm_create_files(struct super_block *sb, struct dentry *root)
 {
        unsigned int i;
 
-       for (i = 0; i < pmu_model->num_counters; i++) {
+       for (i = 0; i < op_arm_model->num_counters; i++) {
                struct dentry *dir;
                char buf[2];
 
@@ -94,63 +43,123 @@ static int pmu_create_files(struct super_block *sb, struct dentry *root)
        return 0;
 }
 
-static int pmu_setup(void)
+static int op_arm_setup(void)
 {
        int ret;
 
        spin_lock(&oprofilefs_lock);
-       ret = pmu_model->setup_ctrs();
+       ret = op_arm_model->setup_ctrs();
        spin_unlock(&oprofilefs_lock);
        return ret;
 }
 
-static int pmu_start(void)
+static int op_arm_start(void)
 {
        int ret = -EBUSY;
 
-       down(&pmu_sem);
-       if (!pmu_enabled) {
-               ret = pmu_model->start();
-               pmu_enabled = !ret;
+       down(&op_arm_sem);
+       if (!op_arm_enabled) {
+               ret = op_arm_model->start();
+               op_arm_enabled = !ret;
        }
-       up(&pmu_sem);
+       up(&op_arm_sem);
        return ret;
 }
 
-static void pmu_stop(void)
+static void op_arm_stop(void)
+{
+       down(&op_arm_sem);
+       if (op_arm_enabled)
+               op_arm_model->stop();
+       op_arm_enabled = 0;
+       up(&op_arm_sem);
+}
+
+#ifdef CONFIG_PM
+static int op_arm_suspend(struct sys_device *dev, pm_message_t state)
 {
-       down(&pmu_sem);
-       if (pmu_enabled)
-               pmu_model->stop();
-       pmu_enabled = 0;
-       up(&pmu_sem);
+       down(&op_arm_sem);
+       if (op_arm_enabled)
+               op_arm_model->stop();
+       up(&op_arm_sem);
+       return 0;
 }
 
-int __init pmu_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec)
+static int op_arm_resume(struct sys_device *dev)
 {
-       init_MUTEX(&pmu_sem);
+       down(&op_arm_sem);
+       if (op_arm_enabled && op_arm_model->start())
+               op_arm_enabled = 0;
+       up(&op_arm_sem);
+       return 0;
+}
+
+static struct sysdev_class oprofile_sysclass = {
+       set_kset_name("oprofile"),
+       .resume         = op_arm_resume,
+       .suspend        = op_arm_suspend,
+};
 
-       if (spec->init() < 0)
-               return -ENODEV;
+static struct sys_device device_oprofile = {
+       .id             = 0,
+       .cls            = &oprofile_sysclass,
+};
 
-       pmu_model = spec;
-       init_driverfs();
-       ops->create_files = pmu_create_files;
-       ops->setup = pmu_setup;
-       ops->shutdown = pmu_stop;
-       ops->start = pmu_start;
-       ops->stop = pmu_stop;
-       ops->cpu_type = pmu_model->name;
-       printk(KERN_INFO "oprofile: using %s PMU\n", spec->name);
+static int __init init_driverfs(void)
+{
+       int ret;
 
-       return 0;
+       if (!(ret = sysdev_class_register(&oprofile_sysclass)))
+               ret = sysdev_register(&device_oprofile);
+
+       return ret;
+}
+
+static void  exit_driverfs(void)
+{
+       sysdev_unregister(&device_oprofile);
+       sysdev_class_unregister(&oprofile_sysclass);
+}
+#else
+#define init_driverfs()        do { } while (0)
+#define exit_driverfs() do { } while (0)
+#endif /* CONFIG_PM */
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+       struct op_arm_model_spec *spec = NULL;
+       int ret = -ENODEV;
+
+#ifdef CONFIG_CPU_XSCALE
+       spec = &op_xscale_spec;
+#endif
+
+       if (spec) {
+               init_MUTEX(&op_arm_sem);
+
+               if (spec->init() < 0)
+                       return -ENODEV;
+
+               op_arm_model = spec;
+               init_driverfs();
+               ops->create_files = op_arm_create_files;
+               ops->setup = op_arm_setup;
+               ops->shutdown = op_arm_stop;
+               ops->start = op_arm_start;
+               ops->stop = op_arm_stop;
+               ops->cpu_type = op_arm_model->name;
+               ops->backtrace = arm_backtrace;
+               printk(KERN_INFO "oprofile: using %s\n", spec->name);
+       }
+
+       return ret;
 }
 
-void pmu_exit(void)
+void oprofile_arch_exit(void)
 {
-       if (pmu_model) {
+       if (op_arm_model) {
                exit_driverfs();
-               pmu_model = NULL;
+               op_arm_model = NULL;
        }
 }
 
diff --git a/arch/arm/oprofile/init.c b/arch/arm/oprofile/init.c
deleted file mode 100644 (file)
index d315a3a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include "op_arm_model.h"
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-       int ret = -ENODEV;
-
-#ifdef CONFIG_CPU_XSCALE
-       ret = pmu_init(ops, &op_xscale_spec);
-#endif
-
-       ops->backtrace = arm_backtrace;
-
-       return ret;
-}
-
-void oprofile_arch_exit(void)
-{
-#ifdef CONFIG_CPU_XSCALE
-       pmu_exit();
-#endif
-}
index 2148d07484b7b5999b7f5402bef5aa1a5d9e24c4..38c6ad158547e2e615620888c59848be57784f46 100644 (file)
@@ -26,6 +26,6 @@ extern struct op_arm_model_spec op_xscale_spec;
 
 extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
 
-extern int __init pmu_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
-extern void pmu_exit(void);
+extern int __init op_arm_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
+extern void op_arm_exit(void);
 #endif /* OP_ARM_MODEL_H */
index 7719a4062e3ae075d78b151fc56c255abc112809..7ad69f14a3e7fa16f8e7a4e74134a0bfb4c59869 100644 (file)
@@ -59,7 +59,11 @@ void __init omap_detect_sram(void)
 }
 
 static struct map_desc omap_sram_io_desc[] __initdata = {
-       { OMAP1_SRAM_BASE, OMAP1_SRAM_START, 0, MT_DEVICE }
+       {       /* .length gets filled in at runtime */
+               .virtual        = OMAP1_SRAM_BASE,
+               .pfn            = __phys_to_pfn(OMAP1_SRAM_START),
+               .type           = MT_DEVICE
+       }
 };
 
 /*
index 819895cf0b9eda0c773022cd347bf5d18dfb7e2d..2082a9647f4fb03172ee7ae33468e85077142598 100644 (file)
@@ -33,7 +33,7 @@ struct dma_alloc_record {
 static DEFINE_SPINLOCK(dma_alloc_lock);
 static LIST_HEAD(dma_alloc_list);
 
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
+void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
 {
        struct dma_alloc_record *new;
        struct list_head *this = &dma_alloc_list;
index 27eb1206650761255054093bd9492662be0e056e..86fbdadc51b6b2eb01cbece116a7797e94467938 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/highmem.h>
 #include <asm/io.h>
 
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
+void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
 {
        void *ret;
 
index 4b38d45435f69795809d449979aacd205568e9aa..cfc4f97490c693ca4517014343cbea904510a00d 100644 (file)
@@ -81,7 +81,7 @@ static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
  * portions of the kernel with single large page TLB entries, and
  * still get unique uncached pages for consistent DMA.
  */
-void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
 {
        struct vm_struct *area;
        unsigned long page, va, pa;
index 80f8ef013939f4bc5c55686a97fa013c3e8ad3e8..1ba02baf2f94fbc77a000374b0e859b411303008 100644 (file)
@@ -71,7 +71,7 @@ hwsw_init (void)
 }
 
 void *
-hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
+hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
 {
        if (use_swiotlb(dev))
                return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
index 11957598a8b9447dc4852fdca8c810546f315fb1..21bffba78b6dfc36bc19de97da662590cfc9b064 100644 (file)
@@ -1076,7 +1076,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
  * See Documentation/DMA-mapping.txt
  */
 void *
-sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
+sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
 {
        struct ioc *ioc;
        void *addr;
index a604efc7f6c9db2f859243dae31a35760b6406da..3ebbb3c8ba368f0ef818c659e9f9ecbe4d5797b7 100644 (file)
@@ -314,7 +314,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
 
 void *
 swiotlb_alloc_coherent(struct device *hwdev, size_t size,
-                      dma_addr_t *dma_handle, int flags)
+                      dma_addr_t *dma_handle, gfp_t flags)
 {
        unsigned long dev_addr;
        void *ret;
index d0ee635daf2e7f4104c8b0dca480d9c4712d7dda..e5f5a4e51f700eda856591624ea4c5e6f2f9ea9e 100644 (file)
@@ -939,7 +939,7 @@ xpc_map_bte_errors(bte_result_t error)
 
 
 static inline void *
-xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base)
+xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
 {
        /* see if kmalloc will give us cachline aligned memory by default */
        *base = kmalloc(size, flags);
index 0e4b9ad9ef0250f2a1898dcae7c4a79c6a4f4bcc..75e6e874bebff043ed33fdd3fcf34bab62d0cb7a 100644 (file)
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_dma_set_mask);
  * more information.
  */
 void *sn_dma_alloc_coherent(struct device *dev, size_t size,
-                           dma_addr_t * dma_handle, int flags)
+                           dma_addr_t * dma_handle, gfp_t flags)
 {
        void *cpuaddr;
        unsigned long phys_addr;
index 97a50d38c98f33e3bdfa47733008582dfb744cfc..a617f8c327e8700171de2eae55b684357be7dff0 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/io.h>
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
        /* ignore region specifiers */
@@ -39,7 +39,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
 EXPORT_SYMBOL(dma_alloc_noncoherent);
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
        __attribute__((alias("dma_alloc_noncoherent")));
 
 EXPORT_SYMBOL(dma_alloc_coherent);
index aa7c94b5d7817af04538aefdbfc44c752061350b..8da19fd22ac6f0bbbf5b64d3408e611b5f215f99 100644 (file)
@@ -22,7 +22,7 @@
        pdev_to_baddr(to_pci_dev(dev), (addr))
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
 
@@ -44,7 +44,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
 EXPORT_SYMBOL(dma_alloc_noncoherent);
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
        __attribute__((alias("dma_alloc_noncoherent")));
 
 EXPORT_SYMBOL(dma_alloc_coherent);
index 2cbe196c35fb5884982afc5b34d9950f37a02a0b..a7e3072ff78d5b7570d394c6eac2bd0521b498dc 100644 (file)
@@ -37,7 +37,7 @@
 #define RAM_OFFSET_MASK        0x3fffffff
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
        /* ignore region specifiers */
@@ -61,7 +61,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
 EXPORT_SYMBOL(dma_alloc_noncoherent);
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
 
index 59e54f12212ecf86a41a22a4705936cdc3b6b2b2..4ce02028a292cb4e3d9f5f3d1fbdcea3b02813c1 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
        /* ignore region specifiers */
@@ -45,7 +45,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
 EXPORT_SYMBOL(dma_alloc_noncoherent);
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-       dma_addr_t * dma_handle, int gfp)
+       dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
 
index 368cc095c99fd4a0611708709c2c1f018aead9ec..844c2877a2e386a34169bc2e97b9bc42f866f950 100644 (file)
@@ -349,7 +349,7 @@ pcxl_dma_init(void)
 
 __initcall(pcxl_dma_init);
 
-static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
+static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
 {
        unsigned long vaddr;
        unsigned long paddr;
@@ -502,13 +502,13 @@ struct hppa_dma_ops pcxl_dma_ops = {
 };
 
 static void *fail_alloc_consistent(struct device *dev, size_t size,
-                                  dma_addr_t *dma_handle, int flag)
+                                  dma_addr_t *dma_handle, gfp_t flag)
 {
        return NULL;
 }
 
 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
-                                         dma_addr_t *dma_handle, int flag)
+                                         dma_addr_t *dma_handle, gfp_t flag)
 {
        void *addr = NULL;
 
index a3c38c5a5db2dd69136491a29dfe04936b3fb694..f1c7392255f869693740a9a2cd5ff4ab353f0759 100644 (file)
@@ -78,7 +78,7 @@ typedef struct {
     const char *name2;
     void (*open)(void);
     void (*release)(void);
-    void *(*dma_alloc)(unsigned int, int);
+    void *(*dma_alloc)(unsigned int, gfp_t);
     void (*dma_free)(void *, unsigned int);
     int (*irqinit)(void);
 #ifdef MODULE
index 2ca9ec7ec3a7b4d161be57bf48f5dd3d76c18773..532caa388dc258b9a90407d96a0a978c71854f9b 100644 (file)
@@ -318,7 +318,7 @@ struct cs_sound_settings {
 
 static struct cs_sound_settings sound;
 
-static void *CS_Alloc(unsigned int size, int flags);
+static void *CS_Alloc(unsigned int size, gfp_t flags);
 static void CS_Free(void *ptr, unsigned int size);
 static int CS_IrqInit(void);
 #ifdef MODULE
@@ -959,7 +959,7 @@ static TRANS transCSNormalRead = {
 
 /*** Low level stuff *********************************************************/
 
-static void *CS_Alloc(unsigned int size, int flags)
+static void *CS_Alloc(unsigned int size, gfp_t flags)
 {
        int     order;
 
index 8edee806dae720f616c4d7fb7519c68225103fc7..0f710d2baec60923e0eb84daef8b5a00da5f2510 100644 (file)
@@ -115,7 +115,7 @@ static struct vm_region consistent_head = {
 };
 
 static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, int gfp)
+vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
 {
        unsigned long addr = head->vm_start, end = head->vm_end - size;
        unsigned long flags;
@@ -173,7 +173,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
  * virtual and bus address for that space.
  */
 void *
-__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp)
+__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
 {
        struct page *page;
        struct vm_region *c;
index 81a3d7446d3718576505f4cd847c75c644bd33e2..43505b1fc5d88bf95f3afaab02c89ac815bb3150 100644 (file)
@@ -114,9 +114,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *ptepage;
 
 #ifdef CONFIG_HIGHPTE
-       int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+       gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
 #else
-       int flags = GFP_KERNEL | __GFP_REPEAT;
+       gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
 #endif
 
        ptepage = alloc_pages(flags, 0);
index 1efc18e786d51145db81e383cd0af96b0186762e..610740512d56aeb9c967ba24dea4273ae807288c 100644 (file)
@@ -23,7 +23,7 @@ extern void init_rts7751r2d_IRQ(void);
 extern void *rts7751r2d_ioremap(unsigned long, unsigned long);
 extern int rts7751r2d_irq_demux(int irq);
 
-extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, int);
+extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
 extern int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t);
 
 /*
index 5b92585a38d2b3512889557b9cb18e30f78921bc..3d9a02c093a39d948d0e43ff7b0e7da17199357e 100644 (file)
@@ -31,7 +31,7 @@ static LIST_HEAD(voya_alloc_list);
 #define OHCI_SRAM_SIZE 0x10000
 
 void *voyagergx_consistent_alloc(struct device *dev, size_t size,
-                                dma_addr_t *handle, int flag)
+                                dma_addr_t *handle, gfp_t flag)
 {
        struct list_head *list = &voya_alloc_list;
        struct voya_alloc_entry *entry;
index 83de7ef4e7df6722d6f04468e0007b773599042b..e12418bb1fa5ee3f753ee284a3a8cd6410ca4039 100644 (file)
@@ -33,7 +33,7 @@
 static int gapspci_dma_used = 0;
 
 void *dreamcast_consistent_alloc(struct device *dev, size_t size,
-                                dma_addr_t *dma_handle, int flag)
+                                dma_addr_t *dma_handle, gfp_t flag)
 {
        unsigned long buf;
 
index 1f7af0c73cf454820e9575c46ea037e2314d0368..df3a9e452cc55321c238143b05190557401bb867 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/dma-mapping.h>
 #include <asm/io.h>
 
-void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle)
+void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
 {
        struct page *page, *end, *free;
        void *ret;
index d7c1c76582cc3698d3f2b028103166a3f8c31266..fc6669e8dde189640a053c40f629e63720795d9c 100644 (file)
@@ -49,7 +49,7 @@ IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW,
 
 #else
 
-extern void * mykmalloc(size_t s, int gfp);
+extern void * mykmalloc(size_t s, gfp_t gfp);
 extern void mykfree(void *);
 
 #endif
index aaad29c35c83870a4a65cc710e8535765c27279d..b84e5456b0250b2f17dfd3fe8b78578348177dc8 100644 (file)
@@ -39,7 +39,7 @@ static char * page = NULL ;
 
 #else
 
-void * mykmalloc(size_t s, int gfp)
+void * mykmalloc(size_t s, gfp_t gfp)
 {
        static char * page;
        static size_t free;
index ea008b031a8f50203fa928bb88b40198322fe76e..462cc9d65386a60bcfc57969158c0fc1263f85c8 100644 (file)
@@ -252,7 +252,7 @@ void paging_init(void)
 #endif
 }
 
-struct page *arch_validate(struct page *page, int mask, int order)
+struct page *arch_validate(struct page *page, gfp_t mask, int order)
 {
        unsigned long addr, zero = 0;
        int i;
index ea65db679e9cc59d11a1985a0c935deebdf06fd1..0d73ceeece72cdc34c88e58ca0966d74b84d18a9 100644 (file)
@@ -80,7 +80,7 @@ void free_stack(unsigned long stack, int order)
 unsigned long alloc_stack(int order, int atomic)
 {
        unsigned long page;
-       int flags = GFP_KERNEL;
+       gfp_t flags = GFP_KERNEL;
 
        if (atomic)
                flags = GFP_ATOMIC;
index cf0a0315d586a15bc17dcd2de072956a00dc0d2e..88be97c96987ad17edc63673b1134ffa4864b896 100644 (file)
@@ -187,7 +187,7 @@ static void flush_gart(struct device *dev)
 
 /* Allocate DMA memory on node near device */
 noinline
-static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order)
+static void *dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
 {
        struct page *page;
        int node;
@@ -204,7 +204,7 @@ static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order)
  */
 void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  unsigned gfp)
+                  gfp_t gfp)
 {
        void *memory;
        unsigned long dma_mask = 0;
index 67d90b89af0b0836a2c83364c4ecd9c72caa2710..5a981dca87ffb1c6f9abb2bec204d35b7f6903e2 100644 (file)
@@ -24,7 +24,7 @@ EXPORT_SYMBOL(iommu_sac_force);
  */
 
 void *dma_alloc_coherent(struct device *hwdev, size_t size,
-                        dma_addr_t *dma_handle, unsigned gfp)
+                        dma_addr_t *dma_handle, gfp_t gfp)
 {
        void *ret;
        u64 mask;
index 84fde258cf855393b6d5531f50b435fab767b8c0..1ff82268e8eacf354e5f16eae1a9ea182d3ca191 100644 (file)
@@ -29,7 +29,7 @@
  */
 
 void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
 {
        void *ret;
 
index 95c0a3690b0ffc15618acb6dfcae29f81234d846..4081c36c8c19eb6f95ba2bdb5e46990d5a7c3730 100644 (file)
@@ -98,7 +98,6 @@ struct as_data {
 
        struct as_rq *next_arq[2];      /* next in sort order */
        sector_t last_sector[2];        /* last REQ_SYNC & REQ_ASYNC sectors */
-       struct list_head *dispatch;     /* driver dispatch queue */
        struct list_head *hash;         /* request hash */
 
        unsigned long exit_prob;        /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
        return ioc;
 }
 
+static void as_put_io_context(struct as_rq *arq)
+{
+       struct as_io_context *aic;
+
+       if (unlikely(!arq->io_context))
+               return;
+
+       aic = arq->io_context->aic;
+
+       if (arq->is_sync == REQ_SYNC && aic) {
+               spin_lock(&aic->lock);
+               set_bit(AS_TASK_IORUNNING, &aic->state);
+               aic->last_end_request = jiffies;
+               spin_unlock(&aic->lock);
+       }
+
+       put_io_context(arq->io_context);
+}
+
 /*
  * the back merge hash support functions
  */
@@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq)
                __as_del_arq_hash(arq);
 }
 
-static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
-{
-       as_del_arq_hash(arq);
-
-       if (q->last_merge == arq->request)
-               q->last_merge = NULL;
-}
-
 static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
 {
        struct request *rq = arq->request;
@@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
                BUG_ON(!arq->on_hash);
 
                if (!rq_mergeable(__rq)) {
-                       as_remove_merge_hints(ad->q, arq);
+                       as_del_arq_hash(arq);
                        continue;
                }
 
@@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
 
        WARN_ON(!list_empty(&rq->queuelist));
 
-       if (arq->state == AS_RQ_PRESCHED) {
-               WARN_ON(arq->io_context);
-               goto out;
-       }
-
-       if (arq->state == AS_RQ_MERGED)
-               goto out_ioc;
-
        if (arq->state != AS_RQ_REMOVED) {
                printk("arq->state %d\n", arq->state);
                WARN_ON(1);
                goto out;
        }
 
-       if (!blk_fs_request(rq))
-               goto out;
-
        if (ad->changed_batch && ad->nr_dispatched == 1) {
                kblockd_schedule_work(&ad->antic_work);
                ad->changed_batch = 0;
@@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
                }
        }
 
-out_ioc:
-       if (!arq->io_context)
-               goto out;
-
-       if (arq->is_sync == REQ_SYNC) {
-               struct as_io_context *aic = arq->io_context->aic;
-               if (aic) {
-                       spin_lock(&aic->lock);
-                       set_bit(AS_TASK_IORUNNING, &aic->state);
-                       aic->last_end_request = jiffies;
-                       spin_unlock(&aic->lock);
-               }
-       }
-
-       put_io_context(arq->io_context);
+       as_put_io_context(arq);
 out:
        arq->state = AS_RQ_POSTSCHED;
 }
@@ -1047,72 +1032,10 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
                ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
 
        list_del_init(&arq->fifo);
-       as_remove_merge_hints(q, arq);
+       as_del_arq_hash(arq);
        as_del_arq_rb(ad, arq);
 }
 
-/*
- * as_remove_dispatched_request is called to remove a request which has gone
- * to the dispatch list.
- */
-static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-       struct as_io_context *aic;
-
-       if (!arq) {
-               WARN_ON(1);
-               return;
-       }
-
-       WARN_ON(arq->state != AS_RQ_DISPATCHED);
-       WARN_ON(ON_RB(&arq->rb_node));
-       if (arq->io_context && arq->io_context->aic) {
-               aic = arq->io_context->aic;
-               if (aic) {
-                       WARN_ON(!atomic_read(&aic->nr_dispatched));
-                       atomic_dec(&aic->nr_dispatched);
-               }
-       }
-}
-
-/*
- * as_remove_request is called when a driver has finished with a request.
- * This should be only called for dispatched requests, but for some reason
- * a POWER4 box running hwscan it does not.
- */
-static void as_remove_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-
-       if (unlikely(arq->state == AS_RQ_NEW))
-               goto out;
-
-       if (ON_RB(&arq->rb_node)) {
-               if (arq->state != AS_RQ_QUEUED) {
-                       printk("arq->state %d\n", arq->state);
-                       WARN_ON(1);
-                       goto out;
-               }
-               /*
-                * We'll lose the aliased request(s) here. I don't think this
-                * will ever happen, but if it does, hopefully someone will
-                * report it.
-                */
-               WARN_ON(!list_empty(&rq->queuelist));
-               as_remove_queued_request(q, rq);
-       } else {
-               if (arq->state != AS_RQ_DISPATCHED) {
-                       printk("arq->state %d\n", arq->state);
-                       WARN_ON(1);
-                       goto out;
-               }
-               as_remove_dispatched_request(q, rq);
-       }
-out:
-       arq->state = AS_RQ_REMOVED;
-}
-
 /*
  * as_fifo_expired returns 0 if there are no expired reads on the fifo,
  * 1 otherwise.  It is ratelimited so that we only perform the check once per
@@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad)
 static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
 {
        struct request *rq = arq->request;
-       struct list_head *insert;
        const int data_dir = arq->is_sync;
 
        BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
        /*
         * take it off the sort and fifo list, add to dispatch queue
         */
-       insert = ad->dispatch->prev;
-
        while (!list_empty(&rq->queuelist)) {
                struct request *__rq = list_entry_rq(rq->queuelist.next);
                struct as_rq *__arq = RQ_DATA(__rq);
 
-               list_move_tail(&__rq->queuelist, ad->dispatch);
+               list_del(&__rq->queuelist);
+
+               elv_dispatch_add_tail(ad->q, __rq);
 
                if (__arq->io_context && __arq->io_context->aic)
                        atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
        as_remove_queued_request(ad->q, rq);
        WARN_ON(arq->state != AS_RQ_QUEUED);
 
-       list_add(&rq->queuelist, insert);
+       elv_dispatch_sort(ad->q, rq);
+
        arq->state = AS_RQ_DISPATCHED;
        if (arq->io_context && arq->io_context->aic)
                atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
  * read/write expire, batch expire, etc, and moves it to the dispatch
  * queue. Returns 1 if a request was found, 0 otherwise.
  */
-static int as_dispatch_request(struct as_data *ad)
+static int as_dispatch_request(request_queue_t *q, int force)
 {
+       struct as_data *ad = q->elevator->elevator_data;
        struct as_rq *arq;
        const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
        const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
 
+       if (unlikely(force)) {
+               /*
+                * Forced dispatch, accounting is useless.  Reset
+                * accounting states and dump fifo_lists.  Note that
+                * batch_data_dir is reset to REQ_SYNC to avoid
+                * screwing write batch accounting as write batch
+                * accounting occurs on W->R transition.
+                */
+               int dispatched = 0;
+
+               ad->batch_data_dir = REQ_SYNC;
+               ad->changed_batch = 0;
+               ad->new_batch = 0;
+
+               while (ad->next_arq[REQ_SYNC]) {
+                       as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
+                       dispatched++;
+               }
+               ad->last_check_fifo[REQ_SYNC] = jiffies;
+
+               while (ad->next_arq[REQ_ASYNC]) {
+                       as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
+                       dispatched++;
+               }
+               ad->last_check_fifo[REQ_ASYNC] = jiffies;
+
+               return dispatched;
+       }
+
        /* Signal that the write batch was uncontended, so we can't time it */
        if (ad->batch_data_dir == REQ_ASYNC && !reads) {
                if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1312,6 @@ fifo_expired:
        return 1;
 }
 
-static struct request *as_next_request(request_queue_t *q)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct request *rq = NULL;
-
-       /*
-        * if there are still requests on the dispatch queue, grab the first
-        */
-       if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
-               rq = list_entry_rq(ad->dispatch->next);
-
-       return rq;
-}
-
 /*
  * Add arq to a list behind alias
  */
@@ -1404,17 +1343,25 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
        /*
         * Don't want to have to handle merges.
         */
-       as_remove_merge_hints(ad->q, arq);
+       as_del_arq_hash(arq);
 }
 
 /*
  * add arq to rbtree and fifo
  */
-static void as_add_request(struct as_data *ad, struct as_rq *arq)
+static void as_add_request(request_queue_t *q, struct request *rq)
 {
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = RQ_DATA(rq);
        struct as_rq *alias;
        int data_dir;
 
+       if (arq->state != AS_RQ_PRESCHED) {
+               printk("arq->state: %d\n", arq->state);
+               WARN_ON(1);
+       }
+       arq->state = AS_RQ_NEW;
+
        if (rq_data_dir(arq->request) == READ
                        || current->flags&PF_SYNCWRITE)
                arq->is_sync = 1;
@@ -1437,12 +1384,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
                arq->expires = jiffies + ad->fifo_expire[data_dir];
                list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
 
-               if (rq_mergeable(arq->request)) {
+               if (rq_mergeable(arq->request))
                        as_add_arq_hash(ad, arq);
-
-                       if (!ad->q->last_merge)
-                               ad->q->last_merge = arq->request;
-               }
                as_update_arq(ad, arq); /* keep state machine up to date */
 
        } else {
@@ -1463,96 +1406,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
        arq->state = AS_RQ_QUEUED;
 }
 
-static void as_deactivate_request(request_queue_t *q, struct request *rq)
+static void as_activate_request(request_queue_t *q, struct request *rq)
 {
-       struct as_data *ad = q->elevator->elevator_data;
        struct as_rq *arq = RQ_DATA(rq);
 
-       if (arq) {
-               if (arq->state == AS_RQ_REMOVED) {
-                       arq->state = AS_RQ_DISPATCHED;
-                       if (arq->io_context && arq->io_context->aic)
-                               atomic_inc(&arq->io_context->aic->nr_dispatched);
-               }
-       } else
-               WARN_ON(blk_fs_request(rq)
-                       && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
-
-       /* Stop anticipating - let this request get through */
-       as_antic_stop(ad);
-}
-
-/*
- * requeue the request. The request has not been completed, nor is it a
- * new request, so don't touch accounting.
- */
-static void as_requeue_request(request_queue_t *q, struct request *rq)
-{
-       as_deactivate_request(q, rq);
-       list_add(&rq->queuelist, &q->queue_head);
-}
-
-/*
- * Account a request that is inserted directly onto the dispatch queue.
- * arq->io_context->aic->nr_dispatched should not need to be incremented
- * because only new requests should come through here: requeues go through
- * our explicit requeue handler.
- */
-static void as_account_queued_request(struct as_data *ad, struct request *rq)
-{
-       if (blk_fs_request(rq)) {
-               struct as_rq *arq = RQ_DATA(rq);
-               arq->state = AS_RQ_DISPATCHED;
-               ad->nr_dispatched++;
-       }
+       WARN_ON(arq->state != AS_RQ_DISPATCHED);
+       arq->state = AS_RQ_REMOVED;
+       if (arq->io_context && arq->io_context->aic)
+               atomic_dec(&arq->io_context->aic->nr_dispatched);
 }
 
-static void
-as_insert_request(request_queue_t *q, struct request *rq, int where)
+static void as_deactivate_request(request_queue_t *q, struct request *rq)
 {
-       struct as_data *ad = q->elevator->elevator_data;
        struct as_rq *arq = RQ_DATA(rq);
 
-       if (arq) {
-               if (arq->state != AS_RQ_PRESCHED) {
-                       printk("arq->state: %d\n", arq->state);
-                       WARN_ON(1);
-               }
-               arq->state = AS_RQ_NEW;
-       }
-
-       /* barriers must flush the reorder queue */
-       if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
-                       && where == ELEVATOR_INSERT_SORT)) {
-               WARN_ON(1);
-               where = ELEVATOR_INSERT_BACK;
-       }
-
-       switch (where) {
-               case ELEVATOR_INSERT_BACK:
-                       while (ad->next_arq[REQ_SYNC])
-                               as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
-
-                       while (ad->next_arq[REQ_ASYNC])
-                               as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
-
-                       list_add_tail(&rq->queuelist, ad->dispatch);
-                       as_account_queued_request(ad, rq);
-                       as_antic_stop(ad);
-                       break;
-               case ELEVATOR_INSERT_FRONT:
-                       list_add(&rq->queuelist, ad->dispatch);
-                       as_account_queued_request(ad, rq);
-                       as_antic_stop(ad);
-                       break;
-               case ELEVATOR_INSERT_SORT:
-                       BUG_ON(!blk_fs_request(rq));
-                       as_add_request(ad, arq);
-                       break;
-               default:
-                       BUG();
-                       return;
-       }
+       WARN_ON(arq->state != AS_RQ_REMOVED);
+       arq->state = AS_RQ_DISPATCHED;
+       if (arq->io_context && arq->io_context->aic)
+               atomic_inc(&arq->io_context->aic->nr_dispatched);
 }
 
 /*
@@ -1565,12 +1436,8 @@ static int as_queue_empty(request_queue_t *q)
 {
        struct as_data *ad = q->elevator->elevator_data;
 
-       if (!list_empty(&ad->fifo_list[REQ_ASYNC])
-               || !list_empty(&ad->fifo_list[REQ_SYNC])
-               || !list_empty(ad->dispatch))
-                       return 0;
-
-       return 1;
+       return list_empty(&ad->fifo_list[REQ_ASYNC])
+               && list_empty(&ad->fifo_list[REQ_SYNC]);
 }
 
 static struct request *
@@ -1607,15 +1474,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
        struct request *__rq;
        int ret;
 
-       /*
-        * try last_merge to avoid going to hash
-        */
-       ret = elv_try_last_merge(q, bio);
-       if (ret != ELEVATOR_NO_MERGE) {
-               __rq = q->last_merge;
-               goto out_insert;
-       }
-
        /*
         * see if the merge hash can satisfy a back merge
         */
@@ -1644,9 +1502,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
 
        return ELEVATOR_NO_MERGE;
 out:
-       if (rq_mergeable(__rq))
-               q->last_merge = __rq;
-out_insert:
        if (ret) {
                if (rq_mergeable(__rq))
                        as_hot_arq_hash(ad, RQ_DATA(__rq));
@@ -1693,9 +1548,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
                 * behind the disk head. We currently don't bother adjusting.
                 */
        }
-
-       if (arq->on_hash)
-               q->last_merge = req;
 }
 
 static void
@@ -1763,6 +1615,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
         * kill knowledge of next, this one is a goner
         */
        as_remove_queued_request(q, next);
+       as_put_io_context(anext);
 
        anext->state = AS_RQ_MERGED;
 }
@@ -1782,7 +1635,7 @@ static void as_work_handler(void *data)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       if (as_next_request(q))
+       if (!as_queue_empty(q))
                q->request_fn(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
@@ -1797,7 +1650,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
                return;
        }
 
-       if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) {
+       if (unlikely(arq->state != AS_RQ_POSTSCHED &&
+                    arq->state != AS_RQ_PRESCHED &&
+                    arq->state != AS_RQ_MERGED)) {
                printk("arq->state %d\n", arq->state);
                WARN_ON(1);
        }
@@ -1807,7 +1662,7 @@ static void as_put_request(request_queue_t *q, struct request *rq)
 }
 
 static int as_set_request(request_queue_t *q, struct request *rq,
-                         struct bio *bio, int gfp_mask)
+                         struct bio *bio, gfp_t gfp_mask)
 {
        struct as_data *ad = q->elevator->elevator_data;
        struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
@@ -1907,7 +1762,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
        INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
        ad->sort_list[REQ_SYNC] = RB_ROOT;
        ad->sort_list[REQ_ASYNC] = RB_ROOT;
-       ad->dispatch = &q->queue_head;
        ad->fifo_expire[REQ_SYNC] = default_read_expire;
        ad->fifo_expire[REQ_ASYNC] = default_write_expire;
        ad->antic_expire = default_antic_expire;
@@ -2072,10 +1926,9 @@ static struct elevator_type iosched_as = {
                .elevator_merge_fn =            as_merge,
                .elevator_merged_fn =           as_merged_request,
                .elevator_merge_req_fn =        as_merged_requests,
-               .elevator_next_req_fn =         as_next_request,
-               .elevator_add_req_fn =          as_insert_request,
-               .elevator_remove_req_fn =       as_remove_request,
-               .elevator_requeue_req_fn =      as_requeue_request,
+               .elevator_dispatch_fn =         as_dispatch_request,
+               .elevator_add_req_fn =          as_add_request,
+               .elevator_activate_req_fn =     as_activate_request,
                .elevator_deactivate_req_fn =   as_deactivate_request,
                .elevator_queue_empty_fn =      as_queue_empty,
                .elevator_completed_req_fn =    as_completed_request,
index cd056e7e64ec15d5cf81f16068623e29e07ad187..94690e4d41e096b4a988eb5a27f76d8d1925fc72 100644 (file)
@@ -84,7 +84,6 @@ static int cfq_max_depth = 2;
        (node)->rb_left = NULL;         \
 } while (0)
 #define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
-#define ON_RB(node)            ((node)->rb_color != RB_NONE)
 #define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
 #define rq_rb_key(rq)          (rq)->sector
 
@@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired);
 #undef CFQ_CFQQ_FNS
 
 enum cfq_rq_state_flags {
-       CFQ_CRQ_FLAG_in_flight = 0,
-       CFQ_CRQ_FLAG_in_driver,
-       CFQ_CRQ_FLAG_is_sync,
-       CFQ_CRQ_FLAG_requeued,
+       CFQ_CRQ_FLAG_is_sync = 0,
 };
 
 #define CFQ_CRQ_FNS(name)                                              \
@@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq)                \
        return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0;      \
 }
 
-CFQ_CRQ_FNS(in_flight);
-CFQ_CRQ_FNS(in_driver);
 CFQ_CRQ_FNS(is_sync);
-CFQ_CRQ_FNS(requeued);
 #undef CFQ_CRQ_FNS
 
 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
-static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
+static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
 static void cfq_put_cfqd(struct cfq_data *cfqd);
 
 #define process_sync(tsk)      ((tsk)->flags & PF_SYNCWRITE)
@@ -311,14 +304,6 @@ static inline void cfq_del_crq_hash(struct cfq_rq *crq)
        hlist_del_init(&crq->hash);
 }
 
-static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
-{
-       cfq_del_crq_hash(crq);
-
-       if (q->last_merge == crq->request)
-               q->last_merge = NULL;
-}
-
 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
 {
        const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
@@ -347,18 +332,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
        return NULL;
 }
 
-static inline int cfq_pending_requests(struct cfq_data *cfqd)
-{
-       return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
-}
-
 /*
  * scheduler run of queue, if there are requests pending and no one in the
  * driver that will restart queueing
  */
 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 {
-       if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
+       if (!cfqd->rq_in_driver && cfqd->busy_queues)
                kblockd_schedule_work(&cfqd->unplug_work);
 }
 
@@ -366,7 +346,7 @@ static int cfq_queue_empty(request_queue_t *q)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
 
-       return !cfq_pending_requests(cfqd);
+       return !cfqd->busy_queues;
 }
 
 /*
@@ -386,11 +366,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
        if (crq2 == NULL)
                return crq1;
 
-       if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
-               return crq1;
-       else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
-               return crq2;
-
        if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
                return crq1;
        else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
@@ -461,10 +436,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
        struct rb_node *rbnext, *rbprev;
 
-       rbnext = NULL;
-       if (ON_RB(&last->rb_node))
-               rbnext = rb_next(&last->rb_node);
-       if (!rbnext) {
+       if (!(rbnext = rb_next(&last->rb_node))) {
                rbnext = rb_first(&cfqq->sort_list);
                if (rbnext == &last->rb_node)
                        rbnext = NULL;
@@ -545,13 +517,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
  * the pending list according to last request service
  */
 static inline void
-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        BUG_ON(cfq_cfqq_on_rr(cfqq));
        cfq_mark_cfqq_on_rr(cfqq);
        cfqd->busy_queues++;
 
-       cfq_resort_rr_list(cfqq, requeue);
+       cfq_resort_rr_list(cfqq, 0);
 }
 
 static inline void
@@ -571,22 +543,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 static inline void cfq_del_crq_rb(struct cfq_rq *crq)
 {
        struct cfq_queue *cfqq = crq->cfq_queue;
+       struct cfq_data *cfqd = cfqq->cfqd;
+       const int sync = cfq_crq_is_sync(crq);
 
-       if (ON_RB(&crq->rb_node)) {
-               struct cfq_data *cfqd = cfqq->cfqd;
-               const int sync = cfq_crq_is_sync(crq);
+       BUG_ON(!cfqq->queued[sync]);
+       cfqq->queued[sync]--;
 
-               BUG_ON(!cfqq->queued[sync]);
-               cfqq->queued[sync]--;
+       cfq_update_next_crq(crq);
 
-               cfq_update_next_crq(crq);
+       rb_erase(&crq->rb_node, &cfqq->sort_list);
+       RB_CLEAR_COLOR(&crq->rb_node);
 
-               rb_erase(&crq->rb_node, &cfqq->sort_list);
-               RB_CLEAR_COLOR(&crq->rb_node);
-
-               if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
-                       cfq_del_cfqq_rr(cfqd, cfqq);
-       }
+       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
+               cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
 static struct cfq_rq *
@@ -627,12 +596,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
         * if that happens, put the alias on the dispatch list
         */
        while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
-               cfq_dispatch_sort(cfqd->queue, __alias);
+               cfq_dispatch_insert(cfqd->queue, __alias);
 
        rb_insert_color(&crq->rb_node, &cfqq->sort_list);
 
        if (!cfq_cfqq_on_rr(cfqq))
-               cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq));
+               cfq_add_cfqq_rr(cfqd, cfqq);
 
        /*
         * check if this request is a better next-serve candidate
@@ -643,10 +612,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
 static inline void
 cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
 {
-       if (ON_RB(&crq->rb_node)) {
-               rb_erase(&crq->rb_node, &cfqq->sort_list);
-               cfqq->queued[cfq_crq_is_sync(crq)]--;
-       }
+       rb_erase(&crq->rb_node, &cfqq->sort_list);
+       cfqq->queued[cfq_crq_is_sync(crq)]--;
 
        cfq_add_crq_rb(crq);
 }
@@ -676,49 +643,28 @@ out:
        return NULL;
 }
 
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+static void cfq_activate_request(request_queue_t *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
-
-       if (crq) {
-               struct cfq_queue *cfqq = crq->cfq_queue;
-
-               if (cfq_crq_in_driver(crq)) {
-                       cfq_clear_crq_in_driver(crq);
-                       WARN_ON(!cfqd->rq_in_driver);
-                       cfqd->rq_in_driver--;
-               }
-               if (cfq_crq_in_flight(crq)) {
-                       const int sync = cfq_crq_is_sync(crq);
 
-                       cfq_clear_crq_in_flight(crq);
-                       WARN_ON(!cfqq->on_dispatch[sync]);
-                       cfqq->on_dispatch[sync]--;
-               }
-               cfq_mark_crq_requeued(crq);
-       }
+       cfqd->rq_in_driver++;
 }
 
-/*
- * make sure the service time gets corrected on reissue of this request
- */
-static void cfq_requeue_request(request_queue_t *q, struct request *rq)
+static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
 {
-       cfq_deactivate_request(q, rq);
-       list_add(&rq->queuelist, &q->queue_head);
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
+       WARN_ON(!cfqd->rq_in_driver);
+       cfqd->rq_in_driver--;
 }
 
-static void cfq_remove_request(request_queue_t *q, struct request *rq)
+static void cfq_remove_request(struct request *rq)
 {
        struct cfq_rq *crq = RQ_DATA(rq);
 
-       if (crq) {
-               list_del_init(&rq->queuelist);
-               cfq_del_crq_rb(crq);
-               cfq_remove_merge_hints(q, crq);
-
-       }
+       list_del_init(&rq->queuelist);
+       cfq_del_crq_rb(crq);
+       cfq_del_crq_hash(crq);
 }
 
 static int
@@ -728,12 +674,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
        struct request *__rq;
        int ret;
 
-       ret = elv_try_last_merge(q, bio);
-       if (ret != ELEVATOR_NO_MERGE) {
-               __rq = q->last_merge;
-               goto out_insert;
-       }
-
        __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
        if (__rq && elv_rq_merge_ok(__rq, bio)) {
                ret = ELEVATOR_BACK_MERGE;
@@ -748,8 +688,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
 
        return ELEVATOR_NO_MERGE;
 out:
-       q->last_merge = __rq;
-out_insert:
        *req = __rq;
        return ret;
 }
@@ -762,14 +700,12 @@ static void cfq_merged_request(request_queue_t *q, struct request *req)
        cfq_del_crq_hash(crq);
        cfq_add_crq_hash(cfqd, crq);
 
-       if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
+       if (rq_rb_key(req) != crq->rb_key) {
                struct cfq_queue *cfqq = crq->cfq_queue;
 
                cfq_update_next_crq(crq);
                cfq_reposition_crq_rb(cfqq, crq);
        }
-
-       q->last_merge = req;
 }
 
 static void
@@ -785,7 +721,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
            time_before(next->start_time, rq->start_time))
                list_move(&rq->queuelist, &next->queuelist);
 
-       cfq_remove_request(q, next);
+       cfq_remove_request(next);
 }
 
 static inline void
@@ -992,53 +928,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        return 1;
 }
 
-/*
- * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
- * this function sector sorts the selected request to minimize seeks. we start
- * at cfqd->last_sector, not 0.
- */
-static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
+static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq = crq->cfq_queue;
-       struct list_head *head = &q->queue_head, *entry = head;
-       struct request *__rq;
-       sector_t last;
-
-       list_del(&crq->request->queuelist);
-
-       last = cfqd->last_sector;
-       list_for_each_entry_reverse(__rq, head, queuelist) {
-               struct cfq_rq *__crq = RQ_DATA(__rq);
-
-               if (blk_barrier_rq(__rq))
-                       break;
-               if (!blk_fs_request(__rq))
-                       break;
-               if (cfq_crq_requeued(__crq))
-                       break;
-
-               if (__rq->sector <= crq->request->sector)
-                       break;
-               if (__rq->sector > last && crq->request->sector < last) {
-                       last = crq->request->sector + crq->request->nr_sectors;
-                       break;
-               }
-               entry = &__rq->queuelist;
-       }
-
-       cfqd->last_sector = last;
 
        cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
-
-       cfq_del_crq_rb(crq);
-       cfq_remove_merge_hints(q, crq);
-
-       cfq_mark_crq_in_flight(crq);
-       cfq_clear_crq_requeued(crq);
-
+       cfq_remove_request(crq->request);
        cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
-       list_add_tail(&crq->request->queuelist, entry);
+       elv_dispatch_sort(q, crq->request);
 }
 
 /*
@@ -1159,7 +1057,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                /*
                 * finally, insert request into driver dispatch list
                 */
-               cfq_dispatch_sort(cfqd->queue, crq);
+               cfq_dispatch_insert(cfqd->queue, crq);
 
                cfqd->dispatch_slice++;
                dispatched++;
@@ -1194,7 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 }
 
 static int
-cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
+cfq_dispatch_requests(request_queue_t *q, int force)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq;
@@ -1204,12 +1102,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
 
        cfqq = cfq_select_queue(cfqd, force);
        if (cfqq) {
+               int max_dispatch;
+
+               /*
+                * if idle window is disabled, allow queue buildup
+                */
+               if (!cfq_cfqq_idle_window(cfqq) &&
+                   cfqd->rq_in_driver >= cfqd->cfq_max_depth)
+                       return 0;
+
                cfq_clear_cfqq_must_dispatch(cfqq);
                cfq_clear_cfqq_wait_request(cfqq);
                del_timer(&cfqd->idle_slice_timer);
 
-               if (cfq_class_idle(cfqq))
-                       max_dispatch = 1;
+               if (!force) {
+                       max_dispatch = cfqd->cfq_quantum;
+                       if (cfq_class_idle(cfqq))
+                               max_dispatch = 1;
+               } else
+                       max_dispatch = INT_MAX;
 
                return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
        }
@@ -1217,93 +1128,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
        return 0;
 }
 
-static inline void cfq_account_dispatch(struct cfq_rq *crq)
-{
-       struct cfq_queue *cfqq = crq->cfq_queue;
-       struct cfq_data *cfqd = cfqq->cfqd;
-
-       if (unlikely(!blk_fs_request(crq->request)))
-               return;
-
-       /*
-        * accounted bit is necessary since some drivers will call
-        * elv_next_request() many times for the same request (eg ide)
-        */
-       if (cfq_crq_in_driver(crq))
-               return;
-
-       cfq_mark_crq_in_driver(crq);
-       cfqd->rq_in_driver++;
-}
-
-static inline void
-cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
-       struct cfq_data *cfqd = cfqq->cfqd;
-       unsigned long now;
-
-       if (!cfq_crq_in_driver(crq))
-               return;
-
-       now = jiffies;
-
-       WARN_ON(!cfqd->rq_in_driver);
-       cfqd->rq_in_driver--;
-
-       if (!cfq_class_idle(cfqq))
-               cfqd->last_end_request = now;
-
-       if (!cfq_cfqq_dispatched(cfqq)) {
-               if (cfq_cfqq_on_rr(cfqq)) {
-                       cfqq->service_last = now;
-                       cfq_resort_rr_list(cfqq, 0);
-               }
-               if (cfq_cfqq_expired(cfqq)) {
-                       __cfq_slice_expired(cfqd, cfqq, 0);
-                       cfq_schedule_dispatch(cfqd);
-               }
-       }
-
-       if (cfq_crq_is_sync(crq))
-               crq->io_context->last_end_request = now;
-}
-
-static struct request *cfq_next_request(request_queue_t *q)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct request *rq;
-
-       if (!list_empty(&q->queue_head)) {
-               struct cfq_rq *crq;
-dispatch:
-               rq = list_entry_rq(q->queue_head.next);
-
-               crq = RQ_DATA(rq);
-               if (crq) {
-                       struct cfq_queue *cfqq = crq->cfq_queue;
-
-                       /*
-                        * if idle window is disabled, allow queue buildup
-                        */
-                       if (!cfq_crq_in_driver(crq) &&
-                           !cfq_cfqq_idle_window(cfqq) &&
-                           !blk_barrier_rq(rq) &&
-                           cfqd->rq_in_driver >= cfqd->cfq_max_depth)
-                               return NULL;
-
-                       cfq_remove_merge_hints(q, crq);
-                       cfq_account_dispatch(crq);
-               }
-
-               return rq;
-       }
-
-       if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
-               goto dispatch;
-
-       return NULL;
-}
-
 /*
  * task holds one reference to the queue, dropped when task exits. each crq
  * in-flight on this queue also holds a reference, dropped when crq is freed.
@@ -1422,7 +1246,7 @@ static void cfq_exit_io_context(struct cfq_io_context *cic)
 }
 
 static struct cfq_io_context *
-cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
+cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
 
@@ -1517,7 +1341,7 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
 
 static struct cfq_queue *
 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
-             int gfp_mask)
+             gfp_t gfp_mask)
 {
        const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
        struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1578,7 +1402,7 @@ out:
  * cfqq, so we don't need to worry about it disappearing
  */
 static struct cfq_io_context *
-cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
+cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
@@ -1816,8 +1640,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        }
 }
 
-static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
+static void cfq_insert_request(request_queue_t *q, struct request *rq)
 {
+       struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_rq *crq = RQ_DATA(rq);
        struct cfq_queue *cfqq = crq->cfq_queue;
 
@@ -1827,66 +1652,43 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
 
        list_add_tail(&rq->queuelist, &cfqq->fifo);
 
-       if (rq_mergeable(rq)) {
+       if (rq_mergeable(rq))
                cfq_add_crq_hash(cfqd, crq);
 
-               if (!cfqd->queue->last_merge)
-                       cfqd->queue->last_merge = rq;
-       }
-
        cfq_crq_enqueued(cfqd, cfqq, crq);
 }
 
-static void
-cfq_insert_request(request_queue_t *q, struct request *rq, int where)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-
-       switch (where) {
-               case ELEVATOR_INSERT_BACK:
-                       while (cfq_dispatch_requests(q, INT_MAX, 1))
-                               ;
-                       list_add_tail(&rq->queuelist, &q->queue_head);
-                       /*
-                        * If we were idling with pending requests on
-                        * inactive cfqqs, force dispatching will
-                        * remove the idle timer and the queue won't
-                        * be kicked by __make_request() afterward.
-                        * Kick it here.
-                        */
-                       cfq_schedule_dispatch(cfqd);
-                       break;
-               case ELEVATOR_INSERT_FRONT:
-                       list_add(&rq->queuelist, &q->queue_head);
-                       break;
-               case ELEVATOR_INSERT_SORT:
-                       BUG_ON(!blk_fs_request(rq));
-                       cfq_enqueue(cfqd, rq);
-                       break;
-               default:
-                       printk("%s: bad insert point %d\n", __FUNCTION__,where);
-                       return;
-       }
-}
-
 static void cfq_completed_request(request_queue_t *q, struct request *rq)
 {
        struct cfq_rq *crq = RQ_DATA(rq);
-       struct cfq_queue *cfqq;
+       struct cfq_queue *cfqq = crq->cfq_queue;
+       struct cfq_data *cfqd = cfqq->cfqd;
+       const int sync = cfq_crq_is_sync(crq);
+       unsigned long now;
 
-       if (unlikely(!blk_fs_request(rq)))
-               return;
+       now = jiffies;
 
-       cfqq = crq->cfq_queue;
+       WARN_ON(!cfqd->rq_in_driver);
+       WARN_ON(!cfqq->on_dispatch[sync]);
+       cfqd->rq_in_driver--;
+       cfqq->on_dispatch[sync]--;
 
-       if (cfq_crq_in_flight(crq)) {
-               const int sync = cfq_crq_is_sync(crq);
+       if (!cfq_class_idle(cfqq))
+               cfqd->last_end_request = now;
 
-               WARN_ON(!cfqq->on_dispatch[sync]);
-               cfqq->on_dispatch[sync]--;
+       if (!cfq_cfqq_dispatched(cfqq)) {
+               if (cfq_cfqq_on_rr(cfqq)) {
+                       cfqq->service_last = now;
+                       cfq_resort_rr_list(cfqq, 0);
+               }
+               if (cfq_cfqq_expired(cfqq)) {
+                       __cfq_slice_expired(cfqd, cfqq, 0);
+                       cfq_schedule_dispatch(cfqd);
+               }
        }
 
-       cfq_account_completion(cfqq, crq);
+       if (cfq_crq_is_sync(crq))
+               crq->io_context->last_end_request = now;
 }
 
 static struct request *
@@ -2075,7 +1877,7 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
  */
 static int
 cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-               int gfp_mask)
+               gfp_t gfp_mask)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct task_struct *tsk = current;
@@ -2118,9 +1920,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
                INIT_HLIST_NODE(&crq->hash);
                crq->cfq_queue = cfqq;
                crq->io_context = cic;
-               cfq_clear_crq_in_flight(crq);
-               cfq_clear_crq_in_driver(crq);
-               cfq_clear_crq_requeued(crq);
 
                if (rw == READ || process_sync(tsk))
                        cfq_mark_crq_is_sync(crq);
@@ -2201,7 +2000,7 @@ static void cfq_idle_slice_timer(unsigned long data)
                 * only expire and reinvoke request handler, if there are
                 * other queues with pending requests
                 */
-               if (!cfq_pending_requests(cfqd)) {
+               if (!cfqd->busy_queues) {
                        cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
                        add_timer(&cfqd->idle_slice_timer);
                        goto out_cont;
@@ -2576,10 +2375,9 @@ static struct elevator_type iosched_cfq = {
                .elevator_merge_fn =            cfq_merge,
                .elevator_merged_fn =           cfq_merged_request,
                .elevator_merge_req_fn =        cfq_merged_requests,
-               .elevator_next_req_fn =         cfq_next_request,
+               .elevator_dispatch_fn =         cfq_dispatch_requests,
                .elevator_add_req_fn =          cfq_insert_request,
-               .elevator_remove_req_fn =       cfq_remove_request,
-               .elevator_requeue_req_fn =      cfq_requeue_request,
+               .elevator_activate_req_fn =     cfq_activate_request,
                .elevator_deactivate_req_fn =   cfq_deactivate_request,
                .elevator_queue_empty_fn =      cfq_queue_empty,
                .elevator_completed_req_fn =    cfq_completed_request,
index 52a3ae5289a09a7af79081df5de2ec7e4c40ff26..7929471d7df7efb6a6f885f698da89c8f4c76cf7 100644 (file)
@@ -50,7 +50,6 @@ struct deadline_data {
         * next in sort order. read, write or both are NULL
         */
        struct deadline_rq *next_drq[2];
-       struct list_head *dispatch;     /* driver dispatch queue */
        struct list_head *hash;         /* request hash */
        unsigned int batching;          /* number of sequential requests made */
        sector_t last_sector;           /* head position */
@@ -113,15 +112,6 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq)
                __deadline_del_drq_hash(drq);
 }
 
-static void
-deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
-{
-       deadline_del_drq_hash(drq);
-
-       if (q->last_merge == drq->request)
-               q->last_merge = NULL;
-}
-
 static inline void
 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
 {
@@ -239,10 +229,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
                        dd->next_drq[data_dir] = rb_entry_drq(rbnext);
        }
 
-       if (ON_RB(&drq->rb_node)) {
-               rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
-               RB_CLEAR(&drq->rb_node);
-       }
+       BUG_ON(!ON_RB(&drq->rb_node));
+       rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
+       RB_CLEAR(&drq->rb_node);
 }
 
 static struct request *
@@ -286,7 +275,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir)
 /*
  * add drq to rbtree and fifo
  */
-static inline void
+static void
 deadline_add_request(struct request_queue *q, struct request *rq)
 {
        struct deadline_data *dd = q->elevator->elevator_data;
@@ -301,12 +290,8 @@ deadline_add_request(struct request_queue *q, struct request *rq)
        drq->expires = jiffies + dd->fifo_expire[data_dir];
        list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
 
-       if (rq_mergeable(rq)) {
+       if (rq_mergeable(rq))
                deadline_add_drq_hash(dd, drq);
-
-               if (!q->last_merge)
-                       q->last_merge = rq;
-       }
 }
 
 /*
@@ -315,14 +300,11 @@ deadline_add_request(struct request_queue *q, struct request *rq)
 static void deadline_remove_request(request_queue_t *q, struct request *rq)
 {
        struct deadline_rq *drq = RQ_DATA(rq);
+       struct deadline_data *dd = q->elevator->elevator_data;
 
-       if (drq) {
-               struct deadline_data *dd = q->elevator->elevator_data;
-
-               list_del_init(&drq->fifo);
-               deadline_remove_merge_hints(q, drq);
-               deadline_del_drq_rb(dd, drq);
-       }
+       list_del_init(&drq->fifo);
+       deadline_del_drq_rb(dd, drq);
+       deadline_del_drq_hash(drq);
 }
 
 static int
@@ -332,15 +314,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
        struct request *__rq;
        int ret;
 
-       /*
-        * try last_merge to avoid going to hash
-        */
-       ret = elv_try_last_merge(q, bio);
-       if (ret != ELEVATOR_NO_MERGE) {
-               __rq = q->last_merge;
-               goto out_insert;
-       }
-
        /*
         * see if the merge hash can satisfy a back merge
         */
@@ -373,8 +346,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
 
        return ELEVATOR_NO_MERGE;
 out:
-       q->last_merge = __rq;
-out_insert:
        if (ret)
                deadline_hot_drq_hash(dd, RQ_DATA(__rq));
        *req = __rq;
@@ -399,8 +370,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req)
                deadline_del_drq_rb(dd, drq);
                deadline_add_drq_rb(dd, drq);
        }
-
-       q->last_merge = req;
 }
 
 static void
@@ -452,7 +421,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
        request_queue_t *q = drq->request->q;
 
        deadline_remove_request(q, drq->request);
-       list_add_tail(&drq->request->queuelist, dd->dispatch);
+       elv_dispatch_add_tail(q, drq->request);
 }
 
 /*
@@ -502,8 +471,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
  * deadline_dispatch_requests selects the best request according to
  * read/write expire, fifo_batch, etc
  */
-static int deadline_dispatch_requests(struct deadline_data *dd)
+static int deadline_dispatch_requests(request_queue_t *q, int force)
 {
+       struct deadline_data *dd = q->elevator->elevator_data;
        const int reads = !list_empty(&dd->fifo_list[READ]);
        const int writes = !list_empty(&dd->fifo_list[WRITE]);
        struct deadline_rq *drq;
@@ -597,65 +567,12 @@ dispatch_request:
        return 1;
 }
 
-static struct request *deadline_next_request(request_queue_t *q)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct request *rq;
-
-       /*
-        * if there are still requests on the dispatch queue, grab the first one
-        */
-       if (!list_empty(dd->dispatch)) {
-dispatch:
-               rq = list_entry_rq(dd->dispatch->next);
-               return rq;
-       }
-
-       if (deadline_dispatch_requests(dd))
-               goto dispatch;
-
-       return NULL;
-}
-
-static void
-deadline_insert_request(request_queue_t *q, struct request *rq, int where)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       /* barriers must flush the reorder queue */
-       if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
-                       && where == ELEVATOR_INSERT_SORT))
-               where = ELEVATOR_INSERT_BACK;
-
-       switch (where) {
-               case ELEVATOR_INSERT_BACK:
-                       while (deadline_dispatch_requests(dd))
-                               ;
-                       list_add_tail(&rq->queuelist, dd->dispatch);
-                       break;
-               case ELEVATOR_INSERT_FRONT:
-                       list_add(&rq->queuelist, dd->dispatch);
-                       break;
-               case ELEVATOR_INSERT_SORT:
-                       BUG_ON(!blk_fs_request(rq));
-                       deadline_add_request(q, rq);
-                       break;
-               default:
-                       printk("%s: bad insert point %d\n", __FUNCTION__,where);
-                       return;
-       }
-}
-
 static int deadline_queue_empty(request_queue_t *q)
 {
        struct deadline_data *dd = q->elevator->elevator_data;
 
-       if (!list_empty(&dd->fifo_list[WRITE])
-           || !list_empty(&dd->fifo_list[READ])
-           || !list_empty(dd->dispatch))
-               return 0;
-
-       return 1;
+       return list_empty(&dd->fifo_list[WRITE])
+               && list_empty(&dd->fifo_list[READ]);
 }
 
 static struct request *
@@ -733,7 +650,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
        INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
        dd->sort_list[READ] = RB_ROOT;
        dd->sort_list[WRITE] = RB_ROOT;
-       dd->dispatch = &q->queue_head;
        dd->fifo_expire[READ] = read_expire;
        dd->fifo_expire[WRITE] = write_expire;
        dd->writes_starved = writes_starved;
@@ -748,15 +664,13 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
        struct deadline_data *dd = q->elevator->elevator_data;
        struct deadline_rq *drq = RQ_DATA(rq);
 
-       if (drq) {
-               mempool_free(drq, dd->drq_pool);
-               rq->elevator_private = NULL;
-       }
+       mempool_free(drq, dd->drq_pool);
+       rq->elevator_private = NULL;
 }
 
 static int
 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-                    int gfp_mask)
+                    gfp_t gfp_mask)
 {
        struct deadline_data *dd = q->elevator->elevator_data;
        struct deadline_rq *drq;
@@ -917,9 +831,8 @@ static struct elevator_type iosched_deadline = {
                .elevator_merge_fn =            deadline_merge,
                .elevator_merged_fn =           deadline_merged_request,
                .elevator_merge_req_fn =        deadline_merged_requests,
-               .elevator_next_req_fn =         deadline_next_request,
-               .elevator_add_req_fn =          deadline_insert_request,
-               .elevator_remove_req_fn =       deadline_remove_request,
+               .elevator_dispatch_fn =         deadline_dispatch_requests,
+               .elevator_add_req_fn =          deadline_add_request,
                .elevator_queue_empty_fn =      deadline_queue_empty,
                .elevator_former_req_fn =       deadline_former_request,
                .elevator_latter_req_fn =       deadline_latter_request,
index 98f0126a2deb3228e0866abcb44bf8b2c1af5859..55621d5c577403e3024a6dac9b580185c6c6b9cf 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
+#include <linux/delay.h>
 
 #include <asm/uaccess.h>
 
@@ -83,21 +84,11 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
 }
 EXPORT_SYMBOL(elv_try_merge);
 
-inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
-{
-       if (q->last_merge)
-               return elv_try_merge(q->last_merge, bio);
-
-       return ELEVATOR_NO_MERGE;
-}
-EXPORT_SYMBOL(elv_try_last_merge);
-
 static struct elevator_type *elevator_find(const char *name)
 {
        struct elevator_type *e = NULL;
        struct list_head *entry;
 
-       spin_lock_irq(&elv_list_lock);
        list_for_each(entry, &elv_list) {
                struct elevator_type *__e;
 
@@ -108,7 +99,6 @@ static struct elevator_type *elevator_find(const char *name)
                        break;
                }
        }
-       spin_unlock_irq(&elv_list_lock);
 
        return e;
 }
@@ -120,12 +110,15 @@ static void elevator_put(struct elevator_type *e)
 
 static struct elevator_type *elevator_get(const char *name)
 {
-       struct elevator_type *e = elevator_find(name);
+       struct elevator_type *e;
 
-       if (!e)
-               return NULL;
-       if (!try_module_get(e->elevator_owner))
-               return NULL;
+       spin_lock_irq(&elv_list_lock);
+
+       e = elevator_find(name);
+       if (e && !try_module_get(e->elevator_owner))
+               e = NULL;
+
+       spin_unlock_irq(&elv_list_lock);
 
        return e;
 }
@@ -139,8 +132,6 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
        eq->ops = &e->ops;
        eq->elevator_type = e;
 
-       INIT_LIST_HEAD(&q->queue_head);
-       q->last_merge = NULL;
        q->elevator = eq;
 
        if (eq->ops->elevator_init_fn)
@@ -153,11 +144,15 @@ static char chosen_elevator[16];
 
 static void elevator_setup_default(void)
 {
+       struct elevator_type *e;
+
        /*
         * check if default is set and exists
         */
-       if (chosen_elevator[0] && elevator_find(chosen_elevator))
+       if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) {
+               elevator_put(e);
                return;
+       }
 
 #if defined(CONFIG_IOSCHED_AS)
        strcpy(chosen_elevator, "anticipatory");
@@ -186,6 +181,11 @@ int elevator_init(request_queue_t *q, char *name)
        struct elevator_queue *eq;
        int ret = 0;
 
+       INIT_LIST_HEAD(&q->queue_head);
+       q->last_merge = NULL;
+       q->end_sector = 0;
+       q->boundary_rq = NULL;
+
        elevator_setup_default();
 
        if (!name)
@@ -220,9 +220,52 @@ void elevator_exit(elevator_t *e)
        kfree(e);
 }
 
+/*
+ * Insert rq into dispatch queue of q.  Queue lock must be held on
+ * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
+ * appended to the dispatch queue.  To be used by specific elevators.
+ */
+void elv_dispatch_sort(request_queue_t *q, struct request *rq)
+{
+       sector_t boundary;
+       struct list_head *entry;
+
+       if (q->last_merge == rq)
+               q->last_merge = NULL;
+
+       boundary = q->end_sector;
+
+       list_for_each_prev(entry, &q->queue_head) {
+               struct request *pos = list_entry_rq(entry);
+
+               if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
+                       break;
+               if (rq->sector >= boundary) {
+                       if (pos->sector < boundary)
+                               continue;
+               } else {
+                       if (pos->sector >= boundary)
+                               break;
+               }
+               if (rq->sector >= pos->sector)
+                       break;
+       }
+
+       list_add(&rq->queuelist, entry);
+}
+
 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
 {
        elevator_t *e = q->elevator;
+       int ret;
+
+       if (q->last_merge) {
+               ret = elv_try_merge(q->last_merge, bio);
+               if (ret != ELEVATOR_NO_MERGE) {
+                       *req = q->last_merge;
+                       return ret;
+               }
+       }
 
        if (e->ops->elevator_merge_fn)
                return e->ops->elevator_merge_fn(q, req, bio);
@@ -236,6 +279,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq)
 
        if (e->ops->elevator_merged_fn)
                e->ops->elevator_merged_fn(q, rq);
+
+       q->last_merge = rq;
 }
 
 void elv_merge_requests(request_queue_t *q, struct request *rq,
@@ -243,20 +288,13 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
 {
        elevator_t *e = q->elevator;
 
-       if (q->last_merge == next)
-               q->last_merge = NULL;
-
        if (e->ops->elevator_merge_req_fn)
                e->ops->elevator_merge_req_fn(q, rq, next);
+
+       q->last_merge = rq;
 }
 
-/*
- * For careful internal use by the block layer. Essentially the same as
- * a requeue in that it tells the io scheduler that this request is not
- * active in the driver or hardware anymore, but we don't want the request
- * added back to the scheduler. Function is not exported.
- */
-void elv_deactivate_request(request_queue_t *q, struct request *rq)
+void elv_requeue_request(request_queue_t *q, struct request *rq)
 {
        elevator_t *e = q->elevator;
 
@@ -264,19 +302,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq)
         * it already went through dequeue, we need to decrement the
         * in_flight count again
         */
-       if (blk_account_rq(rq))
+       if (blk_account_rq(rq)) {
                q->in_flight--;
+               if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
+                       e->ops->elevator_deactivate_req_fn(q, rq);
+       }
 
        rq->flags &= ~REQ_STARTED;
 
-       if (e->ops->elevator_deactivate_req_fn)
-               e->ops->elevator_deactivate_req_fn(q, rq);
-}
-
-void elv_requeue_request(request_queue_t *q, struct request *rq)
-{
-       elv_deactivate_request(q, rq);
-
        /*
         * if this is the flush, requeue the original instead and drop the flush
         */
@@ -285,31 +318,27 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
                rq = rq->end_io_data;
        }
 
-       /*
-        * the request is prepped and may have some resources allocated.
-        * allowing unprepped requests to pass this one may cause resource
-        * deadlock.  turn on softbarrier.
-        */
-       rq->flags |= REQ_SOFTBARRIER;
-
-       /*
-        * if iosched has an explicit requeue hook, then use that. otherwise
-        * just put the request at the front of the queue
-        */
-       if (q->elevator->ops->elevator_requeue_req_fn)
-               q->elevator->ops->elevator_requeue_req_fn(q, rq);
-       else
-               __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
 }
 
 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                       int plug)
 {
-       /*
-        * barriers implicitly indicate back insertion
-        */
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) &&
-           where == ELEVATOR_INSERT_SORT)
+       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * barriers implicitly indicate back insertion
+                */
+               if (where == ELEVATOR_INSERT_SORT)
+                       where = ELEVATOR_INSERT_BACK;
+
+               /*
+                * this request is scheduling boundary, update end_sector
+                */
+               if (blk_fs_request(rq)) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = rq;
+               }
+       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
                where = ELEVATOR_INSERT_BACK;
 
        if (plug)
@@ -317,23 +346,54 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
 
        rq->q = q;
 
-       if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
-               q->elevator->ops->elevator_add_req_fn(q, rq, where);
+       switch (where) {
+       case ELEVATOR_INSERT_FRONT:
+               rq->flags |= REQ_SOFTBARRIER;
 
-               if (blk_queue_plugged(q)) {
-                       int nrq = q->rq.count[READ] + q->rq.count[WRITE]
-                                 - q->in_flight;
+               list_add(&rq->queuelist, &q->queue_head);
+               break;
 
-                       if (nrq >= q->unplug_thresh)
-                               __generic_unplug_device(q);
-               }
-       } else
+       case ELEVATOR_INSERT_BACK:
+               rq->flags |= REQ_SOFTBARRIER;
+
+               while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+                       ;
+               list_add_tail(&rq->queuelist, &q->queue_head);
                /*
-                * if drain is set, store the request "locally". when the drain
-                * is finished, the requests will be handed ordered to the io
-                * scheduler
+                * We kick the queue here for the following reasons.
+                * - The elevator might have returned NULL previously
+                *   to delay requests and returned them now.  As the
+                *   queue wasn't empty before this request, ll_rw_blk
+                *   won't run the queue on return, resulting in hang.
+                * - Usually, back inserted requests won't be merged
+                *   with anything.  There's no point in delaying queue
+                *   processing.
                 */
-               list_add_tail(&rq->queuelist, &q->drain_list);
+               blk_remove_plug(q);
+               q->request_fn(q);
+               break;
+
+       case ELEVATOR_INSERT_SORT:
+               BUG_ON(!blk_fs_request(rq));
+               rq->flags |= REQ_SORTED;
+               q->elevator->ops->elevator_add_req_fn(q, rq);
+               if (q->last_merge == NULL && rq_mergeable(rq))
+                       q->last_merge = rq;
+               break;
+
+       default:
+               printk(KERN_ERR "%s: bad insertion point %d\n",
+                      __FUNCTION__, where);
+               BUG();
+       }
+
+       if (blk_queue_plugged(q)) {
+               int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+                       - q->in_flight;
+
+               if (nrq >= q->unplug_thresh)
+                       __generic_unplug_device(q);
+       }
 }
 
 void elv_add_request(request_queue_t *q, struct request *rq, int where,
@@ -348,13 +408,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
 
 static inline struct request *__elv_next_request(request_queue_t *q)
 {
-       struct request *rq = q->elevator->ops->elevator_next_req_fn(q);
+       struct request *rq;
+
+       if (unlikely(list_empty(&q->queue_head) &&
+                    !q->elevator->ops->elevator_dispatch_fn(q, 0)))
+               return NULL;
+
+       rq = list_entry_rq(q->queue_head.next);
 
        /*
         * if this is a barrier write and the device has to issue a
         * flush sequence to support it, check how far we are
         */
-       if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) {
+       if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
                BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
 
                if (q->ordered == QUEUE_ORDERED_FLUSH &&
@@ -371,15 +437,30 @@ struct request *elv_next_request(request_queue_t *q)
        int ret;
 
        while ((rq = __elv_next_request(q)) != NULL) {
-               /*
-                * just mark as started even if we don't start it, a request
-                * that has been delayed should not be passed by new incoming
-                * requests
-                */
-               rq->flags |= REQ_STARTED;
+               if (!(rq->flags & REQ_STARTED)) {
+                       elevator_t *e = q->elevator;
 
-               if (rq == q->last_merge)
-                       q->last_merge = NULL;
+                       /*
+                        * This is the first time the device driver
+                        * sees this request (possibly after
+                        * requeueing).  Notify IO scheduler.
+                        */
+                       if (blk_sorted_rq(rq) &&
+                           e->ops->elevator_activate_req_fn)
+                               e->ops->elevator_activate_req_fn(q, rq);
+
+                       /*
+                        * just mark as started even if we don't start
+                        * it, a request that has been delayed should
+                        * not be passed by new incoming requests
+                        */
+                       rq->flags |= REQ_STARTED;
+               }
+
+               if (!q->boundary_rq || q->boundary_rq == rq) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = NULL;
+               }
 
                if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
                        break;
@@ -391,9 +472,9 @@ struct request *elv_next_request(request_queue_t *q)
                        /*
                         * the request may have been (partially) prepped.
                         * we need to keep this request in the front to
-                        * avoid resource deadlock.  turn on softbarrier.
+                        * avoid resource deadlock.  REQ_STARTED will
+                        * prevent other fs requests from passing this one.
                         */
-                       rq->flags |= REQ_SOFTBARRIER;
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL) {
@@ -416,42 +497,32 @@ struct request *elv_next_request(request_queue_t *q)
        return rq;
 }
 
-void elv_remove_request(request_queue_t *q, struct request *rq)
+void elv_dequeue_request(request_queue_t *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       BUG_ON(list_empty(&rq->queuelist));
+
+       list_del_init(&rq->queuelist);
 
        /*
         * the time frame between a request being removed from the lists
         * and to it is freed is accounted as io that is in progress at
-        * the driver side. note that we only account requests that the
-        * driver has seen (REQ_STARTED set), to avoid false accounting
-        * for request-request merges
+        * the driver side.
         */
        if (blk_account_rq(rq))
                q->in_flight++;
-
-       /*
-        * the main clearing point for q->last_merge is on retrieval of
-        * request by driver (it calls elv_next_request()), but it _can_
-        * also happen here if a request is added to the queue but later
-        * deleted without ever being given to driver (merged with another
-        * request).
-        */
-       if (rq == q->last_merge)
-               q->last_merge = NULL;
-
-       if (e->ops->elevator_remove_req_fn)
-               e->ops->elevator_remove_req_fn(q, rq);
 }
 
 int elv_queue_empty(request_queue_t *q)
 {
        elevator_t *e = q->elevator;
 
+       if (!list_empty(&q->queue_head))
+               return 0;
+
        if (e->ops->elevator_queue_empty_fn)
                return e->ops->elevator_queue_empty_fn(q);
 
-       return list_empty(&q->queue_head);
+       return 1;
 }
 
 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
@@ -487,7 +558,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
 }
 
 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-                   int gfp_mask)
+                   gfp_t gfp_mask)
 {
        elevator_t *e = q->elevator;
 
@@ -523,11 +594,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
        /*
         * request is released from the driver, io must be done
         */
-       if (blk_account_rq(rq))
+       if (blk_account_rq(rq)) {
                q->in_flight--;
-
-       if (e->ops->elevator_completed_req_fn)
-               e->ops->elevator_completed_req_fn(q, rq);
+               if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+                       e->ops->elevator_completed_req_fn(q, rq);
+       }
 }
 
 int elv_register_queue(struct request_queue *q)
@@ -555,10 +626,9 @@ void elv_unregister_queue(struct request_queue *q)
 
 int elv_register(struct elevator_type *e)
 {
+       spin_lock_irq(&elv_list_lock);
        if (elevator_find(e->elevator_name))
                BUG();
-
-       spin_lock_irq(&elv_list_lock);
        list_add_tail(&e->list, &elv_list);
        spin_unlock_irq(&elv_list_lock);
 
@@ -582,25 +652,36 @@ EXPORT_SYMBOL_GPL(elv_unregister);
  * switch to new_e io scheduler. be careful not to introduce deadlocks -
  * we don't free the old io scheduler, before we have allocated what we
  * need for the new one. this way we have a chance of going back to the old
- * one, if the new one fails init for some reason. we also do an intermediate
- * switch to noop to ensure safety with stack-allocated requests, since they
- * don't originate from the block layer allocator. noop is safe here, because
- * it never needs to touch the elevator itself for completion events. DRAIN
- * flags will make sure we don't touch it for additions either.
+ * one, if the new one fails init for some reason.
  */
 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 {
-       elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
-       struct elevator_type *noop_elevator = NULL;
-       elevator_t *old_elevator;
+       elevator_t *old_elevator, *e;
 
+       /*
+        * Allocate new elevator
+        */
+       e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
        if (!e)
                goto error;
 
        /*
-        * first step, drain requests from the block freelist
+        * Turn on BYPASS and drain all requests w/ elevator private data
         */
-       blk_wait_queue_drained(q, 0);
+       spin_lock_irq(q->queue_lock);
+
+       set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+
+       while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+               ;
+
+       while (q->rq.elvpriv) {
+               spin_unlock_irq(q->queue_lock);
+               msleep(10);
+               spin_lock_irq(q->queue_lock);
+       }
+
+       spin_unlock_irq(q->queue_lock);
 
        /*
         * unregister old elevator data
@@ -608,18 +689,6 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
        elv_unregister_queue(q);
        old_elevator = q->elevator;
 
-       /*
-        * next step, switch to noop since it uses no private rq structures
-        * and doesn't allocate any memory for anything. then wait for any
-        * non-fs requests in-flight
-        */
-       noop_elevator = elevator_get("noop");
-       spin_lock_irq(q->queue_lock);
-       elevator_attach(q, noop_elevator, e);
-       spin_unlock_irq(q->queue_lock);
-
-       blk_wait_queue_drained(q, 1);
-
        /*
         * attach and start new elevator
         */
@@ -630,11 +699,10 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
                goto fail_register;
 
        /*
-        * finally exit old elevator and start queue again
+        * finally exit old elevator and turn off BYPASS.
         */
        elevator_exit(old_elevator);
-       blk_finish_queue_drain(q);
-       elevator_put(noop_elevator);
+       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        return;
 
 fail_register:
@@ -643,13 +711,13 @@ fail_register:
         * one again (along with re-adding the sysfs dir)
         */
        elevator_exit(e);
+       e = NULL;
 fail:
        q->elevator = old_elevator;
        elv_register_queue(q);
-       blk_finish_queue_drain(q);
+       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       kfree(e);
 error:
-       if (noop_elevator)
-               elevator_put(noop_elevator);
        elevator_put(new_e);
        printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
 }
@@ -701,11 +769,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
        return len;
 }
 
+EXPORT_SYMBOL(elv_dispatch_sort);
 EXPORT_SYMBOL(elv_add_request);
 EXPORT_SYMBOL(__elv_add_request);
 EXPORT_SYMBOL(elv_requeue_request);
 EXPORT_SYMBOL(elv_next_request);
-EXPORT_SYMBOL(elv_remove_request);
+EXPORT_SYMBOL(elv_dequeue_request);
 EXPORT_SYMBOL(elv_queue_empty);
 EXPORT_SYMBOL(elv_completed_request);
 EXPORT_SYMBOL(elevator_exit);
index baedac522945a45ca7c886de93c6f1f9d6739765..0af73512b9a8c358e0bf4ae07922590c4700011d 100644 (file)
@@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 
        blk_queue_activity_fn(q, NULL, NULL);
-
-       INIT_LIST_HEAD(&q->drain_list);
 }
 
 EXPORT_SYMBOL(blk_queue_make_request);
@@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
        struct request *rq = flush_rq->end_io_data;
        request_queue_t *q = rq->q;
 
+       elv_completed_request(q, flush_rq);
+
        rq->flags |= REQ_BAR_PREFLUSH;
 
        if (!flush_rq->errors)
@@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq)
        struct request *rq = flush_rq->end_io_data;
        request_queue_t *q = rq->q;
 
+       elv_completed_request(q, flush_rq);
+
        rq->flags |= REQ_BAR_POSTFLUSH;
 
        q->end_flush_fn(q, flush_rq);
@@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
        if (!list_empty(&rq->queuelist))
                blkdev_dequeue_request(rq);
 
-       elv_deactivate_request(q, rq);
-
        flush_rq->end_io_data = rq;
        flush_rq->end_io = blk_pre_flush_end_io;
 
@@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
 static char *rq_flags[] = {
        "REQ_RW",
        "REQ_FAILFAST",
+       "REQ_SORTED",
        "REQ_SOFTBARRIER",
        "REQ_HARDBARRIER",
        "REQ_CMD",
@@ -1047,6 +1048,7 @@ static char *rq_flags[] = {
        "REQ_STARTED",
        "REQ_DONTPREP",
        "REQ_QUEUED",
+       "REQ_ELVPRIV",
        "REQ_PC",
        "REQ_BLOCK_PC",
        "REQ_SENSE",
@@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q)
 
        rl->count[READ] = rl->count[WRITE] = 0;
        rl->starved[READ] = rl->starved[WRITE] = 0;
+       rl->elvpriv = 0;
        init_waitqueue_head(&rl->wait[READ]);
        init_waitqueue_head(&rl->wait[WRITE]);
-       init_waitqueue_head(&rl->drain);
 
        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
                                mempool_free_slab, request_cachep, q->node);
@@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q)
 
 static int __make_request(request_queue_t *, struct bio *);
 
-request_queue_t *blk_alloc_queue(int gfp_mask)
+request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 {
        return blk_alloc_queue_node(gfp_mask, -1);
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
-request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id)
+request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        request_queue_t *q;
 
@@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(request_queue_t *q, struct request *rq)
 {
-       elv_put_request(q, rq);
+       if (rq->flags & REQ_ELVPRIV)
+               elv_put_request(q, rq);
        mempool_free(rq, q->rq.rq_pool);
 }
 
 static inline struct request *
-blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
+                 int priv, gfp_t gfp_mask)
 {
        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 
@@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
         */
        rq->flags = rw;
 
-       if (!elv_set_request(q, rq, bio, gfp_mask))
-               return rq;
+       if (priv) {
+               if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+                       mempool_free(rq, q->rq.rq_pool);
+                       return NULL;
+               }
+               rq->flags |= REQ_ELVPRIV;
+       }
 
-       mempool_free(rq, q->rq.rq_pool);
-       return NULL;
+       return rq;
 }
 
 /*
@@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(request_queue_t *q, int rw)
+static void freed_request(request_queue_t *q, int rw, int priv)
 {
        struct request_list *rl = &q->rq;
 
        rl->count[rw]--;
+       if (priv)
+               rl->elvpriv--;
 
        __freed_request(q, rw);
 
        if (unlikely(rl->starved[rw ^ 1]))
                __freed_request(q, rw ^ 1);
-
-       if (!rl->count[READ] && !rl->count[WRITE]) {
-               smp_mb();
-               if (unlikely(waitqueue_active(&rl->drain)))
-                       wake_up(&rl->drain);
-       }
 }
 
 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
@@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw)
  * Returns !NULL on success, with queue_lock *not held*.
  */
 static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
-                                  int gfp_mask)
+                                  gfp_t gfp_mask)
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
        struct io_context *ioc = current_io_context(GFP_ATOMIC);
-
-       if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
-               goto out;
+       int priv;
 
        if (rl->count[rw]+1 >= q->nr_requests) {
                /*
@@ -1937,9 +1939,14 @@ get_rq:
        rl->starved[rw] = 0;
        if (rl->count[rw] >= queue_congestion_on_threshold(q))
                set_queue_congested(q, rw);
+
+       priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       if (priv)
+               rl->elvpriv++;
+
        spin_unlock_irq(q->queue_lock);
 
-       rq = blk_alloc_request(q, rw, bio, gfp_mask);
+       rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
        if (!rq) {
                /*
                 * Allocation failed presumably due to memory. Undo anything
@@ -1949,7 +1956,7 @@ get_rq:
                 * wait queue, but this is pretty rare.
                 */
                spin_lock_irq(q->queue_lock);
-               freed_request(q, rw);
+               freed_request(q, rw, priv);
 
                /*
                 * in the very unlikely event that allocation failed and no
@@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
        return rq;
 }
 
-struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
+struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
 {
        struct request *rq;
 
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
  * @gfp_mask:  memory allocation flags
  */
 int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
-                   unsigned int len, unsigned int gfp_mask)
+                   unsigned int len, gfp_t gfp_mask)
 {
        struct bio *bio;
 
@@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk)
 {
        unsigned long now = jiffies;
 
-       __disk_stat_add(disk, time_in_queue,
-                       disk->in_flight * (now - disk->stamp));
-       disk->stamp = now;
+       if (now == disk->stamp)
+               return;
 
-       if (disk->in_flight)
-               __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle));
-       disk->stamp_idle = now;
+       if (disk->in_flight) {
+               __disk_stat_add(disk, time_in_queue,
+                               disk->in_flight * (now - disk->stamp));
+               __disk_stat_add(disk, io_ticks, (now - disk->stamp));
+       }
+       disk->stamp = now;
 }
 
 /*
@@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
        if (unlikely(--req->ref_count))
                return;
 
+       elv_completed_request(q, req);
+
        req->rq_status = RQ_INACTIVE;
        req->rl = NULL;
 
@@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
         */
        if (rl) {
                int rw = rq_data_dir(req);
-
-               elv_completed_request(q, req);
+               int priv = req->flags & REQ_ELVPRIV;
 
                BUG_ON(!list_empty(&req->queuelist));
 
                blk_free_request(q, req);
-               freed_request(q, rw);
+               freed_request(q, rw, priv);
        }
 }
 
 void blk_put_request(struct request *req)
 {
+       unsigned long flags;
+       request_queue_t *q = req->q;
+
        /*
-        * if req->rl isn't set, this request didnt originate from the
-        * block layer, so it's safe to just disregard it
+        * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
+        * following if (q) test.
         */
-       if (req->rl) {
-               unsigned long flags;
-               request_queue_t *q = req->q;
-
+       if (q) {
                spin_lock_irqsave(q->queue_lock, flags);
                __blk_put_request(q, req);
                spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio)
        }
 }
 
-void blk_finish_queue_drain(request_queue_t *q)
-{
-       struct request_list *rl = &q->rq;
-       struct request *rq;
-       int requeued = 0;
-
-       spin_lock_irq(q->queue_lock);
-       clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
-
-       while (!list_empty(&q->drain_list)) {
-               rq = list_entry_rq(q->drain_list.next);
-
-               list_del_init(&rq->queuelist);
-               elv_requeue_request(q, rq);
-               requeued++;
-       }
-
-       if (requeued)
-               q->request_fn(q);
-
-       spin_unlock_irq(q->queue_lock);
-
-       wake_up(&rl->wait[0]);
-       wake_up(&rl->wait[1]);
-       wake_up(&rl->drain);
-}
-
-static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
-{
-       int wait = rl->count[READ] + rl->count[WRITE];
-
-       if (dispatch)
-               wait += !list_empty(&q->queue_head);
-
-       return wait;
-}
-
-/*
- * We rely on the fact that only requests allocated through blk_alloc_request()
- * have io scheduler private data structures associated with them. Any other
- * type of request (allocated on stack or through kmalloc()) should not go
- * to the io scheduler core, but be attached to the queue head instead.
- */
-void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
-{
-       struct request_list *rl = &q->rq;
-       DEFINE_WAIT(wait);
-
-       spin_lock_irq(q->queue_lock);
-       set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
-
-       while (wait_drain(q, rl, wait_dispatch)) {
-               prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
-
-               if (wait_drain(q, rl, wait_dispatch)) {
-                       __generic_unplug_device(q);
-                       spin_unlock_irq(q->queue_lock);
-                       io_schedule();
-                       spin_lock_irq(q->queue_lock);
-               }
-
-               finish_wait(&rl->drain, &wait);
-       }
-
-       spin_unlock_irq(q->queue_lock);
-}
-
-/*
- * block waiting for the io scheduler being started again.
- */
-static inline void block_wait_queue_running(request_queue_t *q)
-{
-       DEFINE_WAIT(wait);
-
-       while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
-               struct request_list *rl = &q->rq;
-
-               prepare_to_wait_exclusive(&rl->drain, &wait,
-                               TASK_UNINTERRUPTIBLE);
-
-               /*
-                * re-check the condition. avoids using prepare_to_wait()
-                * in the fast path (queue is running)
-                */
-               if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
-                       io_schedule();
-
-               finish_wait(&rl->drain, &wait);
-       }
-}
-
 static void handle_bad_sector(struct bio *bio)
 {
        char b[BDEVNAME_SIZE];
@@ -2983,8 +2902,6 @@ end_io:
                if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
                        goto end_io;
 
-               block_wait_queue_running(q);
-
                /*
                 * If this device has partitions, remap block n
                 * of partition p to block n+start(p) of the disk.
@@ -3393,7 +3310,7 @@ void exit_io_context(void)
  * but since the current task itself holds a reference, the context can be
  * used in general code, so long as it stays within `current` context.
  */
-struct io_context *current_io_context(int gfp_flags)
+struct io_context *current_io_context(gfp_t gfp_flags)
 {
        struct task_struct *tsk = current;
        struct io_context *ret;
@@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context);
  *
  * This is always called in the context of the task which submitted the I/O.
  */
-struct io_context *get_io_context(int gfp_flags)
+struct io_context *get_io_context(gfp_t gfp_flags)
 {
        struct io_context *ret;
        ret = current_io_context(gfp_flags);
index b35e08876dd4b77cb1b661571c204d51e610fcd9..96c664af8d069f9f5d96d77efea7b293f2549fe6 100644 (file)
@@ -881,7 +881,7 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
 {
        struct file *filp = lo->lo_backing_file;
-       int gfp = lo->old_gfp_mask;
+       gfp_t gfp = lo->old_gfp_mask;
 
        if (lo->lo_state != Lo_bound)
                return -ENXIO;
index b1730b62c37e4793a2401df975680c1231903d16..f56b8edb06e42b217b46c9556c23ca30ef6a6063 100644 (file)
@@ -7,57 +7,19 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
-/*
- * See if we can find a request that this buffer can be coalesced with.
- */
-static int elevator_noop_merge(request_queue_t *q, struct request **req,
-                              struct bio *bio)
-{
-       int ret;
-
-       ret = elv_try_last_merge(q, bio);
-       if (ret != ELEVATOR_NO_MERGE)
-               *req = q->last_merge;
-
-       return ret;
-}
-
-static void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
-                                        struct request *next)
-{
-       list_del_init(&next->queuelist);
-}
-
-static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
-                                     int where)
+static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
 {
-       if (where == ELEVATOR_INSERT_FRONT)
-               list_add(&rq->queuelist, &q->queue_head);
-       else
-               list_add_tail(&rq->queuelist, &q->queue_head);
-
-       /*
-        * new merges must not precede this barrier
-        */
-       if (rq->flags & REQ_HARDBARRIER)
-               q->last_merge = NULL;
-       else if (!q->last_merge)
-               q->last_merge = rq;
+       elv_dispatch_add_tail(q, rq);
 }
 
-static struct request *elevator_noop_next_request(request_queue_t *q)
+static int elevator_noop_dispatch(request_queue_t *q, int force)
 {
-       if (!list_empty(&q->queue_head))
-               return list_entry_rq(q->queue_head.next);
-
-       return NULL;
+       return 0;
 }
 
 static struct elevator_type elevator_noop = {
        .ops = {
-               .elevator_merge_fn              = elevator_noop_merge,
-               .elevator_merge_req_fn          = elevator_noop_merge_requests,
-               .elevator_next_req_fn           = elevator_noop_next_request,
+               .elevator_dispatch_fn           = elevator_noop_dispatch,
                .elevator_add_req_fn            = elevator_noop_add_request,
        },
        .elevator_name = "noop",
index 145c1fbffe0135648992806cf3166fd36fa89832..68c60a5bcdabaf4f172854bbd1df959ecbe80689 100644 (file)
@@ -348,7 +348,7 @@ static int rd_open(struct inode *inode, struct file *filp)
                struct block_device *bdev = inode->i_bdev;
                struct address_space *mapping;
                unsigned bsize;
-               int gfp_mask;
+               gfp_t gfp_mask;
 
                inode = igrab(bdev->bd_inode);
                rd_bdev[unit] = bdev;
index d57007b92f77baf17521806fca3f408e70a7729c..1ded3b433459238df7fe9453c493f0a876905905 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
  *
- *  Copyright 2004 Red Hat, Inc.
+ *  Copyright 2004-2005 Red Hat, Inc.
  *
  *  Author/maintainer:  Jeff Garzik <jgarzik@pobox.com>
  *
 #include <asm/semaphore.h>
 #include <asm/uaccess.h>
 
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Promise SATA SX8 block driver");
-
 #if 0
 #define CARM_DEBUG
 #define CARM_VERBOSE_DEBUG
@@ -45,9 +41,35 @@ MODULE_DESCRIPTION("Promise SATA SX8 block driver");
 #undef CARM_NDEBUG
 
 #define DRV_NAME "sx8"
-#define DRV_VERSION "0.8"
+#define DRV_VERSION "1.0"
 #define PFX DRV_NAME ": "
 
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Promise SATA SX8 block driver");
+MODULE_VERSION(DRV_VERSION);
+
+/*
+ * SX8 hardware has a single message queue for all ATA ports.
+ * When this driver was written, the hardware (firmware?) would
+ * corrupt data eventually, if more than one request was outstanding.
+ * As one can imagine, having 8 ports bottlenecking on a single
+ * command hurts performance.
+ *
+ * Based on user reports, later versions of the hardware (firmware?)
+ * seem to be able to survive with more than one command queued.
+ *
+ * Therefore, we default to the safe option -- 1 command -- but
+ * allow the user to increase this.
+ *
+ * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
+ * but problems seem to occur when you exceed ~30, even on newer hardware.
+ */
+static int max_queue = 1;
+module_param(max_queue, int, 0444);
+MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
+
+
 #define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
 
 /* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
@@ -90,12 +112,10 @@ enum {
 
        /* command message queue limits */
        CARM_MAX_REQ            = 64,          /* max command msgs per host */
-       CARM_MAX_Q              = 1,               /* one command at a time */
        CARM_MSG_LOW_WATER      = (CARM_MAX_REQ / 4),        /* refill mark */
 
        /* S/G limits, host-wide and per-request */
        CARM_MAX_REQ_SG         = 32,        /* max s/g entries per request */
-       CARM_SG_BOUNDARY        = 0xffffUL,         /* s/g segment boundary */
        CARM_MAX_HOST_SG        = 600,          /* max s/g entries per host */
        CARM_SG_LOW_WATER       = (CARM_MAX_HOST_SG / 4),   /* re-fill mark */
 
@@ -181,6 +201,10 @@ enum {
        FL_DYN_MAJOR            = (1 << 17),
 };
 
+enum {
+       CARM_SG_BOUNDARY        = 0xffffUL,         /* s/g segment boundary */
+};
+
 enum scatter_gather_types {
        SGT_32BIT               = 0,
        SGT_64BIT               = 1,
@@ -218,7 +242,6 @@ static const char *state_name[] = {
 
 struct carm_port {
        unsigned int                    port_no;
-       unsigned int                    n_queued;
        struct gendisk                  *disk;
        struct carm_host                *host;
 
@@ -448,7 +471,7 @@ static inline int carm_lookup_bucket(u32 msg_size)
        for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
                if (msg_size <= msg_sizes[i])
                        return i;
-       
+
        return -ENOENT;
 }
 
@@ -509,7 +532,7 @@ static struct carm_request *carm_get_request(struct carm_host *host)
        if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
                return NULL;
 
-       for (i = 0; i < CARM_MAX_Q; i++)
+       for (i = 0; i < max_queue; i++)
                if ((host->msg_alloc & (1ULL << i)) == 0) {
                        struct carm_request *crq = &host->req[i];
                        crq->port = NULL;
@@ -521,14 +544,14 @@ static struct carm_request *carm_get_request(struct carm_host *host)
                        assert(host->n_msgs <= CARM_MAX_REQ);
                        return crq;
                }
-       
+
        DPRINTK("no request available, returning NULL\n");
        return NULL;
 }
 
 static int carm_put_request(struct carm_host *host, struct carm_request *crq)
 {
-       assert(crq->tag < CARM_MAX_Q);
+       assert(crq->tag < max_queue);
 
        if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
                return -EINVAL; /* tried to clear a tag that was not active */
@@ -791,7 +814,7 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
                        int is_ok)
 {
        carm_end_request_queued(host, crq, is_ok);
-       if (CARM_MAX_Q == 1)
+       if (max_queue == 1)
                carm_round_robin(host);
        else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
                 (host->hw_sg_used <= CARM_SG_LOW_WATER)) {
index c9bdf544ed2cd1126f6d67840c082ee6bb53d42e..c556f4d3ccd7cda2122fa91098cce56491d3387d 100644 (file)
@@ -62,7 +62,7 @@
 
 static inline unsigned char *alloc_buf(void)
 {
-       unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+       gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
 
        if (PAGE_SIZE != N_TTY_BUF_SIZE)
                return kmalloc(N_TTY_BUF_SIZE, prio);
index 1af733d07321a8f5de4c5412745fd46428ce7332..9e24bbd4090cc71ed13b1a7f54dbe2b77909bdd1 100644 (file)
  *             added changelog
  *     1.2     Erik Gilling: Cobalt Networks support
  *             Tim Hockin: general cleanup, Cobalt support
+ *     1.3     Jon Ringle: Comdial MP1000 support
+ *
  */
 
-#define NVRAM_VERSION  "1.2"
+#define NVRAM_VERSION  "1.3"
 
 #include <linux/module.h>
 #include <linux/config.h>
@@ -45,6 +47,7 @@
 #define PC             1
 #define ATARI          2
 #define COBALT         3
+#define MP1000         4
 
 /* select machine configuration */
 #if defined(CONFIG_ATARI)
@@ -54,6 +57,9 @@
 #  if defined(CONFIG_COBALT)
 #    include <linux/cobalt-nvram.h>
 #    define MACH COBALT
+#  elif defined(CONFIG_MACH_MP1000)
+#    undef MACH
+#    define MACH MP1000
 #  else
 #    define MACH PC
 #  endif
 
 #endif
 
+#if MACH == MP1000
+
+/* RTC in a MP1000 */
+#define CHECK_DRIVER_INIT()    1
+
+#define MP1000_CKS_RANGE_START 0
+#define MP1000_CKS_RANGE_END   111
+#define MP1000_CKS_LOC         112
+
+#define NVRAM_BYTES            (128-NVRAM_FIRST_BYTE)
+
+#define mach_check_checksum    mp1000_check_checksum
+#define mach_set_checksum      mp1000_set_checksum
+#define mach_proc_infos                mp1000_proc_infos
+
+#endif
+
 /* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
  * rtc_lock held. Due to the index-port/data-port design of the RTC, we
  * don't want two different things trying to get to it at once. (e.g. the
@@ -915,6 +938,91 @@ atari_proc_infos(unsigned char *nvram, char *buffer, int *len,
 
 #endif /* MACH == ATARI */
 
+#if MACH == MP1000
+
+static int
+mp1000_check_checksum(void)
+{
+       int i;
+       unsigned short sum = 0;
+       unsigned short expect;
+
+       for (i = MP1000_CKS_RANGE_START; i <= MP1000_CKS_RANGE_END; ++i)
+               sum += __nvram_read_byte(i);
+
+        expect = __nvram_read_byte(MP1000_CKS_LOC+1)<<8 |
+           __nvram_read_byte(MP1000_CKS_LOC);
+       return ((sum & 0xffff) == expect);
+}
+
+static void
+mp1000_set_checksum(void)
+{
+       int i;
+       unsigned short sum = 0;
+
+       for (i = MP1000_CKS_RANGE_START; i <= MP1000_CKS_RANGE_END; ++i)
+               sum += __nvram_read_byte(i);
+       __nvram_write_byte(sum >> 8, MP1000_CKS_LOC + 1);
+       __nvram_write_byte(sum & 0xff, MP1000_CKS_LOC);
+}
+
+#ifdef CONFIG_PROC_FS
+
+#define         SERVER_N_LEN         32
+#define         PATH_N_LEN           32
+#define         FILE_N_LEN           32
+#define         NVRAM_MAGIC_SIG      0xdead
+
+typedef struct NvRamImage
+{
+       unsigned short int    magic;
+       unsigned short int    mode;
+       char                  fname[FILE_N_LEN];
+       char                  path[PATH_N_LEN];
+       char                  server[SERVER_N_LEN];
+       char                  pad[12];
+} NvRam;
+
+static int
+mp1000_proc_infos(unsigned char *nvram, char *buffer, int *len,
+    off_t *begin, off_t offset, int size)
+{
+       int checksum;
+        NvRam* nv = (NvRam*)nvram;
+
+       spin_lock_irq(&rtc_lock);
+       checksum = __nvram_check_checksum();
+       spin_unlock_irq(&rtc_lock);
+
+       PRINT_PROC("Checksum status: %svalid\n", checksum ? "" : "not ");
+
+        switch( nv->mode )
+        {
+           case 0 :
+                    PRINT_PROC( "\tMode 0, tftp prompt\n" );
+                    break;
+           case 1 :
+                    PRINT_PROC( "\tMode 1, booting from disk\n" );
+                    break;
+           case 2 :
+                    PRINT_PROC( "\tMode 2, Alternate boot from disk /boot/%s\n", nv->fname );
+                    break;
+           case 3 :
+                    PRINT_PROC( "\tMode 3, Booting from net:\n" );
+                    PRINT_PROC( "\t\t%s:%s%s\n",nv->server, nv->path, nv->fname );
+                    break;
+           default:
+                    PRINT_PROC( "\tInconsistant nvram?\n" );
+                    break;
+        }
+
+       return 1;
+}
+#endif
+
+#endif /* MACH == MP1000 */
+
 MODULE_LICENSE("GPL");
 
 EXPORT_SYMBOL(__nvram_read_byte);
index 4802bbbb6dc9817c9b4f49b08bcad65268f8f84a..c9e92d85c8931e62516a642d74d3a3b016382bc4 100644 (file)
@@ -1630,7 +1630,7 @@ static void ether1394_complete_cb(void *__ptask)
 /* Transmit a packet (called by kernel) */
 static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
 {
-       int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+       gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
        struct eth1394hdr *eth;
        struct eth1394_priv *priv = netdev_priv(dev);
        int proto;
index f6a8ac026557db5639d446bf1188cf9d73b22739..378646b5a1b8389339e31569dc3ee15b0e619bd4 100644 (file)
@@ -524,7 +524,7 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
 }
 
 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
-                                         unsigned int gfp_mask)
+                                         gfp_t gfp_mask)
 {
        struct mthca_mailbox *mailbox;
 
index 65f976a13e02065469c99001d41d2a09201853e7..18175bec84c27619a69839f1b3e84c527c03a9aa 100644 (file)
@@ -248,7 +248,7 @@ void mthca_cmd_event(struct mthca_dev *dev, u16 token,
                     u8  status, u64 out_param);
 
 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
-                                         unsigned int gfp_mask);
+                                         gfp_t gfp_mask);
 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
 
 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
index 7bd7a4bec7b433f2e533602681a75d7e06553940..9ad8b3b6cfef9e9a5a01615dd07507a15bdbed0c 100644 (file)
@@ -82,7 +82,7 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
 }
 
 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
-                                 unsigned int gfp_mask)
+                                 gfp_t gfp_mask)
 {
        struct mthca_icm *icm;
        struct mthca_icm_chunk *chunk = NULL;
index bafa51544aa39db8db19cf2626361550fec7ea89..29433f295253afc530169a10cbe4c7e5f96a66b3 100644 (file)
@@ -77,7 +77,7 @@ struct mthca_icm_iter {
 struct mthca_dev;
 
 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
-                                 unsigned int gfp_mask);
+                                 gfp_t gfp_mask);
 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm);
 
 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
index 2fba2bbe72d8f627136d5983b40c421c55220f6d..01654fcabc52128ce866728b24ddaa73c1437a1f 100644 (file)
@@ -91,7 +91,7 @@ int bitmap_active(struct bitmap *bitmap)
 
 #define WRITE_POOL_SIZE 256
 /* mempool for queueing pending writes on the bitmap file */
-static void *write_pool_alloc(unsigned int gfp_flags, void *data)
+static void *write_pool_alloc(gfp_t gfp_flags, void *data)
 {
        return kmalloc(sizeof(struct page_list), gfp_flags);
 }
index b6148f6f78367230accacaf0770b74a5b0bc30df..28c1a628621fa21a7c37f1a9534ee090e834621c 100644 (file)
@@ -331,7 +331,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
 {
        struct bio *bio;
        unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
+       gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
        unsigned int i;
 
        /*
index 91c74843dc0d8c54c88bec1973a44eda18b0866b..1e6bdba2675639568443cc1afb7ca5e603ae151a 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/scatterlist.h>
+#include <asm/sizes.h>
 #include <asm/hardware/amba.h>
 #include <asm/hardware/clock.h>
 #include <asm/mach/mmc.h>
index 8dcaa357b4bb13bbaba9e543f1b45dc49ce880b3..9133351ba4831859b11951f1938f1a91f02ef838 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/concat.h>
 
+#include <asm/hardware.h>
 #include <asm/io.h>
 #include <asm/sizes.h>
 #include <asm/mach/flash.h>
index bc537440ca025931124c278d81f66a3d742cabf6..f822cd3025ff07b154e48e7cd2d226e21f4f6898 100644 (file)
@@ -1027,8 +1027,7 @@ static void cp_reset_hw (struct cp_private *cp)
                if (!(cpr8(Cmd) & CmdReset))
                        return;
 
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(10);
+               schedule_timeout_uninterruptible(10);
        }
 
        printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
@@ -1575,6 +1574,7 @@ static struct ethtool_ops cp_ethtool_ops = {
        .set_wol                = cp_set_wol,
        .get_strings            = cp_get_strings,
        .get_ethtool_stats      = cp_get_ethtool_stats,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1773,6 +1773,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        for (i = 0; i < 3; i++)
                ((u16 *) (dev->dev_addr))[i] =
                    le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        dev->open = cp_open;
        dev->stop = cp_close;
index 4c2cf7bbd252927c40a018bb8d69b0e3c26b900e..30bee11c48bd2d8364ac8f8e93abe47c5467593e 100644 (file)
@@ -552,7 +552,8 @@ const static struct {
 
        { "RTL-8100B/8139D",
          HW_REVID(1, 1, 1, 0, 1, 0, 1),
-         HasLWake,
+         HasHltClk /* XXX undocumented? */
+       | HasLWake,
        },
 
        { "RTL-8101",
@@ -970,6 +971,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
        for (i = 0; i < 3; i++)
                ((u16 *) (dev->dev_addr))[i] =
                    le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        /* The Rtl8139-specific entries in the device structure. */
        dev->open = rtl8139_open;
@@ -2465,6 +2467,7 @@ static struct ethtool_ops rtl8139_ethtool_ops = {
        .get_strings            = rtl8139_get_strings,
        .get_stats_count        = rtl8139_get_stats_count,
        .get_ethtool_stats      = rtl8139_get_ethtool_stats,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index c748b0e1641921b195d5df144ae0addba10eb3b3..fee8c5cf1f3a8e9ab1c25becaafaba3d4d0c4a22 100644 (file)
@@ -475,6 +475,14 @@ config SGI_IOC3_ETH_HW_TX_CSUM
          the moment only acceleration of IPv4 is supported.  This option
          enables offloading for checksums on transmit.  If unsure, say Y.
 
+config MIPS_SIM_NET
+       tristate "MIPS simulator Network device (EXPERIMENTAL)"
+       depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL
+       help
+         The MIPSNET device is a simple Ethernet network device which is
+         emulated by the MIPS Simulator.
+         If you are not using a MIPSsim or are unsure, say N.
+
 config SGI_O2MACE_ETH
        tristate "SGI O2 MACE Fast Ethernet support"
        depends on NET_ETHERNET && SGI_IP32=y
@@ -1330,7 +1338,7 @@ config FORCEDETH
 
 config CS89x0
        tristate "CS89x0 support"
-       depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105
+       depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105 || MACH_MP1000
        ---help---
          Support for CS89x0 chipset based Ethernet cards. If you have a
          network (Ethernet) card of this type, say Y and read the
@@ -2083,6 +2091,7 @@ config SPIDER_NET
 config GIANFAR
        tristate "Gianfar Ethernet"
        depends on 85xx || 83xx
+       select PHYLIB
        help
          This driver supports the Gigabit TSEC on the MPC85xx 
          family of chips, and the FEC on the 8540
@@ -2243,6 +2252,20 @@ config ISERIES_VETH
        tristate "iSeries Virtual Ethernet driver support"
        depends on PPC_ISERIES
 
+config RIONET
+       tristate "RapidIO Ethernet over messaging driver support"
+       depends on NETDEVICES && RAPIDIO
+
+config RIONET_TX_SIZE
+       int "Number of outbound queue entries"
+       depends on RIONET
+       default "128"
+
+config RIONET_RX_SIZE
+       int "Number of inbound queue entries"
+       depends on RIONET
+       default "128"
+
 config FDDI
        bool "FDDI driver support"
        depends on (PCI || EISA)
index 8aeec9f2495b5e712879b7735e95bf141ce49e05..1a84e0435f64b52d022aaa3f3ebd4efca0f62f4a 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 
-gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_phy.o
+gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_mii.o
 
 #
 # link order important here
@@ -64,6 +64,7 @@ obj-$(CONFIG_SKFP) += skfp/
 obj-$(CONFIG_VIA_RHINE) += via-rhine.o
 obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
 obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
+obj-$(CONFIG_RIONET) += rionet.o
 
 #
 # end link order section
@@ -166,6 +167,7 @@ obj-$(CONFIG_EQUALIZER) += eql.o
 obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
 obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o
 obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
 obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
 obj-$(CONFIG_DECLANCE) += declance.o
 obj-$(CONFIG_ATARILANCE) += atarilance.o
index c56d86d371a9c6d15eac175a7770c80f96d26b60..3d50e953faaabafd06bea37698e38924b7cb1978 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <asm/system.h>
 #include <asm/irq.h>
+#include <asm/hardware.h>
 #include <asm/io.h>
 
 #define TX_BUFFERS 15
index c82b9cd1c9246c1b1660a059757fd0764912ee5b..78506911d6566d96efdadf6464b828ebd4d3d6cb 100644 (file)
@@ -151,13 +151,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
        SUPPORTED_Autoneg
 
-static char *phy_link[] = 
-{      "unknown", 
-       "10Base2", "10BaseT", 
-       "AUI",
-       "100BaseT", "100BaseTX", "100BaseFX"
-};
-
 int bcm_5201_init(struct net_device *dev, int phy_addr)
 {
        s16 data;
@@ -785,6 +778,7 @@ static struct mii_chip_info {
        {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
        {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
        {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
+       {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
        {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
        {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
        {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
@@ -1045,7 +1039,7 @@ found:
 #endif
 
        if (aup->mii->chip_info == NULL) {
-               printk(KERN_ERR "%s: Au1x No MII transceivers found!\n",
+               printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
                                dev->name);
                return -1;
        }
@@ -1546,6 +1540,9 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
                printk(KERN_ERR "%s: out of memory\n", dev->name);
                goto err_out;
        }
+       aup->mii->next = NULL;
+       aup->mii->chip_info = NULL;
+       aup->mii->status = 0;
        aup->mii->mii_control_reg = 0;
        aup->mii->mii_data_reg = 0;
 
index 94939f570f78988e7c54c9dbcab685ca59385253..282ebd15f0115ae659b74a7b9e902631fa1640e6 100644 (file)
@@ -106,6 +106,29 @@ static int b44_poll(struct net_device *dev, int *budget);
 static void b44_poll_controller(struct net_device *dev);
 #endif
 
+static int dma_desc_align_mask;
+static int dma_desc_sync_size;
+
+static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
+                                                dma_addr_t dma_base,
+                                                unsigned long offset,
+                                                enum dma_data_direction dir)
+{
+       dma_sync_single_range_for_device(&pdev->dev, dma_base,
+                                        offset & dma_desc_align_mask,
+                                        dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
+                                             dma_addr_t dma_base,
+                                             unsigned long offset,
+                                             enum dma_data_direction dir)
+{
+       dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
+                                     offset & dma_desc_align_mask,
+                                     dma_desc_sync_size, dir);
+}
+
 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
 {
        return readl(bp->regs + reg);
@@ -668,6 +691,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
        dp->ctrl = cpu_to_le32(ctrl);
        dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
 
+       if (bp->flags & B44_FLAG_RX_RING_HACK)
+               b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+                                            dest_idx * sizeof(dp),
+                                            DMA_BIDIRECTIONAL);
+
        return RX_PKT_BUF_SZ;
 }
 
@@ -692,6 +720,11 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
        pci_unmap_addr_set(dest_map, mapping,
                           pci_unmap_addr(src_map, mapping));
 
+       if (bp->flags & B44_FLAG_RX_RING_HACK)
+               b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
+                                         src_idx * sizeof(src_desc),
+                                         DMA_BIDIRECTIONAL);
+
        ctrl = src_desc->ctrl;
        if (dest_idx == (B44_RX_RING_SIZE - 1))
                ctrl |= cpu_to_le32(DESC_CTRL_EOT);
@@ -700,8 +733,14 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 
        dest_desc->ctrl = ctrl;
        dest_desc->addr = src_desc->addr;
+
        src_map->skb = NULL;
 
+       if (bp->flags & B44_FLAG_RX_RING_HACK)
+               b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+                                            dest_idx * sizeof(dest_desc),
+                                            DMA_BIDIRECTIONAL);
+
        pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
                                       RX_PKT_BUF_SZ,
                                       PCI_DMA_FROMDEVICE);
@@ -959,6 +998,11 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
        bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
        bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
 
+       if (bp->flags & B44_FLAG_TX_RING_HACK)
+               b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
+                                            entry * sizeof(bp->tx_ring[0]),
+                                            DMA_TO_DEVICE);
+
        entry = NEXT_TX(entry);
 
        bp->tx_prod = entry;
@@ -1064,6 +1108,16 @@ static void b44_init_rings(struct b44 *bp)
        memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
        memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
 
+       if (bp->flags & B44_FLAG_RX_RING_HACK)
+               dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
+                                          DMA_TABLE_BYTES,
+                                          PCI_DMA_BIDIRECTIONAL);
+
+       if (bp->flags & B44_FLAG_TX_RING_HACK)
+               dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
+                                          DMA_TABLE_BYTES,
+                                          PCI_DMA_TODEVICE);
+
        for (i = 0; i < bp->rx_pending; i++) {
                if (b44_alloc_rx_skb(bp, -1, i) < 0)
                        break;
@@ -1085,14 +1139,28 @@ static void b44_free_consistent(struct b44 *bp)
                bp->tx_buffers = NULL;
        }
        if (bp->rx_ring) {
-               pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
-                                   bp->rx_ring, bp->rx_ring_dma);
+               if (bp->flags & B44_FLAG_RX_RING_HACK) {
+                       dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
+                                        DMA_TABLE_BYTES,
+                                        DMA_BIDIRECTIONAL);
+                       kfree(bp->rx_ring);
+               } else
+                       pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+                                           bp->rx_ring, bp->rx_ring_dma);
                bp->rx_ring = NULL;
+               bp->flags &= ~B44_FLAG_RX_RING_HACK;
        }
        if (bp->tx_ring) {
-               pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
-                                   bp->tx_ring, bp->tx_ring_dma);
+               if (bp->flags & B44_FLAG_TX_RING_HACK) {
+                       dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
+                                        DMA_TABLE_BYTES,
+                                        DMA_TO_DEVICE);
+                       kfree(bp->tx_ring);
+               } else
+                       pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+                                           bp->tx_ring, bp->tx_ring_dma);
                bp->tx_ring = NULL;
+               bp->flags &= ~B44_FLAG_TX_RING_HACK;
        }
 }
 
@@ -1118,12 +1186,56 @@ static int b44_alloc_consistent(struct b44 *bp)
 
        size = DMA_TABLE_BYTES;
        bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
-       if (!bp->rx_ring)
-               goto out_err;
+       if (!bp->rx_ring) {
+               /* Allocation may have failed due to pci_alloc_consistent
+                  insisting on use of GFP_DMA, which is more restrictive
+                  than necessary...  */
+               struct dma_desc *rx_ring;
+               dma_addr_t rx_ring_dma;
+
+               if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
+                       goto out_err;
+
+               memset(rx_ring, 0, size);
+               rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
+                                            DMA_TABLE_BYTES,
+                                            DMA_BIDIRECTIONAL);
+
+               if (rx_ring_dma + size > B44_DMA_MASK) {
+                       kfree(rx_ring);
+                       goto out_err;
+               }
+
+               bp->rx_ring = rx_ring;
+               bp->rx_ring_dma = rx_ring_dma;
+               bp->flags |= B44_FLAG_RX_RING_HACK;
+       }
 
        bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
-       if (!bp->tx_ring)
-               goto out_err;
+       if (!bp->tx_ring) {
+               /* Allocation may have failed due to pci_alloc_consistent
+                  insisting on use of GFP_DMA, which is more restrictive
+                  than necessary...  */
+               struct dma_desc *tx_ring;
+               dma_addr_t tx_ring_dma;
+
+               if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
+                       goto out_err;
+
+               memset(tx_ring, 0, size);
+               tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
+                                            DMA_TABLE_BYTES,
+                                            DMA_TO_DEVICE);
+
+               if (tx_ring_dma + size > B44_DMA_MASK) {
+                       kfree(tx_ring);
+                       goto out_err;
+               }
+
+               bp->tx_ring = tx_ring;
+               bp->tx_ring_dma = tx_ring_dma;
+               bp->flags |= B44_FLAG_TX_RING_HACK;
+       }
 
        return 0;
 
@@ -1676,6 +1788,7 @@ static struct ethtool_ops b44_ethtool_ops = {
        .set_pauseparam         = b44_set_pauseparam,
        .get_msglevel           = b44_get_msglevel,
        .set_msglevel           = b44_set_msglevel,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1718,6 +1831,7 @@ static int __devinit b44_get_invariants(struct b44 *bp)
        bp->dev->dev_addr[3] = eeprom[80];
        bp->dev->dev_addr[4] = eeprom[83];
        bp->dev->dev_addr[5] = eeprom[82];
+       memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
 
        bp->phy_addr = eeprom[90] & 0x1f;
 
@@ -1971,6 +2085,12 @@ static struct pci_driver b44_driver = {
 
 static int __init b44_init(void)
 {
+       unsigned int dma_desc_align_size = dma_get_cache_alignment();
+
+       /* Setup paramaters for syncing RX/TX DMA descriptors */
+       dma_desc_align_mask = ~(dma_desc_align_size - 1);
+       dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
+
        return pci_module_init(&b44_driver);
 }
 
index 11c40a2e71c70a416e75a6d9c4f3c69579c50ddc..593cb0ad4100c6c68d43346f17922061dc0d0759 100644 (file)
@@ -400,6 +400,8 @@ struct b44 {
 #define B44_FLAG_ADV_100HALF   0x04000000
 #define B44_FLAG_ADV_100FULL   0x08000000
 #define B44_FLAG_INTERNAL_PHY  0x10000000
+#define B44_FLAG_RX_RING_HACK  0x20000000
+#define B44_FLAG_TX_RING_HACK  0x40000000
 
        u32                     rx_offset;
 
index f264ff162979f5eec2efecae262f3a21fe399588..8032126fd5891845ecee2ba19de06a28f312ddbb 100644 (file)
@@ -4241,6 +4241,43 @@ out:
        return 0;
 }
 
+static void bond_activebackup_xmit_copy(struct sk_buff *skb,
+                                        struct bonding *bond,
+                                        struct slave *slave)
+{
+       struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
+       struct ethhdr *eth_data;
+       u8 *hwaddr;
+       int res;
+
+       if (!skb2) {
+               printk(KERN_ERR DRV_NAME ": Error: "
+                      "bond_activebackup_xmit_copy(): skb_copy() failed\n");
+               return;
+       }
+
+       skb2->mac.raw = (unsigned char *)skb2->data;
+       eth_data = eth_hdr(skb2);
+
+       /* Pick an appropriate source MAC address
+        *      -- use slave's perm MAC addr, unless used by bond
+        *      -- otherwise, borrow active slave's perm MAC addr
+        *         since that will not be used
+        */
+       hwaddr = slave->perm_hwaddr;
+       if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
+               hwaddr = bond->curr_active_slave->perm_hwaddr;
+
+       /* Set source MAC address appropriately */
+       memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
+
+       res = bond_dev_queue_xmit(bond, skb2, slave->dev);
+       if (res)
+               dev_kfree_skb(skb2);
+
+       return;
+}
+
 /*
  * in active-backup mode, we know that bond->curr_active_slave is always valid if
  * the bond has a usable interface.
@@ -4257,10 +4294,26 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
                goto out;
        }
 
-       if (bond->curr_active_slave) { /* one usable interface */
-               res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
+       if (!bond->curr_active_slave)
+               goto out;
+
+       /* Xmit IGMP frames on all slaves to ensure rapid fail-over
+          for multicast traffic on snooping switches */
+       if (skb->protocol == __constant_htons(ETH_P_IP) &&
+           skb->nh.iph->protocol == IPPROTO_IGMP) {
+               struct slave *slave, *active_slave;
+               int i;
+
+               active_slave = bond->curr_active_slave;
+               bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
+                                           active_slave->prev)
+                       if (IS_UP(slave->dev) &&
+                           (slave->link == BOND_LINK_UP))
+                               bond_activebackup_xmit_copy(skb, bond, slave);
        }
 
+       res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
+
 out:
        if (res) {
                /* no suitable interface, frame not sent */
index 2e617424d3fbf631237d29b592a5c811e26976b3..50f43dbf31aed4c8313e72c3ef08729407be1a95 100644 (file)
@@ -489,7 +489,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
 /* local page allocation routines for the receive buffers. jumbo pages
  * require at least 8K contiguous and 8K aligned buffers.
  */
-static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
+static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 {
        cas_page_t *page;
 
@@ -561,7 +561,7 @@ static void cas_spare_free(struct cas *cp)
 }
 
 /* replenish spares if needed */
-static void cas_spare_recover(struct cas *cp, const int flags)
+static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 {
        struct list_head list, *elem, *tmp;
        int needed, i;
index a6078ad9b654ff0295901a885a72c730a4188c8b..bfdae10036ed869eb8dae346cba4ffc317b39045 100644 (file)
@@ -182,6 +182,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
 #define CIRRUS_DEFAULT_IRQ     VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
 static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0};
 static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
+#elif defined(CONFIG_MACH_MP1000)
+#include <asm/arch/mp1000-seprom.h>
+static unsigned int netcard_portlist[] __initdata = {MP1000_EIO_BASE+0x300, 0};
+static unsigned int cs8900_irq_map[] = {IRQ_EINT3,0,0,0};
 #else
 static unsigned int netcard_portlist[] __initdata =
    { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
@@ -590,6 +594,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
                        cnt -= j;
                }
        } else
+#elif defined(CONFIG_MACH_MP1000)
+       if (1) {
+               memcpy(dev->dev_addr, get_eeprom_mac_address(), ETH_ALEN);
+       } else
 #endif
 
         if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == 
@@ -649,6 +657,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
        if (1) {
                printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n");
        } else
+#elif defined(CONFIG_MACH_MP1000)
+       if (1) {
+               lp->force |= FORCE_RJ45;
+       } else
 #endif
        if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
                printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
@@ -1231,7 +1243,7 @@ net_open(struct net_device *dev)
        else
 #endif
        {
-#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105)
+#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105) && !defined(CONFIG_MACH_MP1000)
                if (((1 << dev->irq) & lp->irq_map) == 0) {
                        printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
                                dev->name, dev->irq, lp->irq_map);
index decea264f1214c615b557e2678f34ce6225b8643..f19d1ebe01837479a148d33463832953980c92c7 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/config.h>
 
-#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105)
+#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105) || defined (CONFIG_MACH_MP1000)
 /* IXDP2401/IXDP2801 uses dword-aligned register addressing */
 #define CS89x0_PORT(reg) ((reg) * 2)
 #else
index 521c83137bf67f61e275cacdd0553cc1af9feaa2..f130bdab3fd319f06a1fac0101737d5149b381f2 100644 (file)
@@ -5,7 +5,7 @@
  *
  *      adopted from sunlance.c by Richard van den Berg
  *
- *      Copyright (C) 2002, 2003  Maciej W. Rozycki
+ *      Copyright (C) 2002, 2003, 2005  Maciej W. Rozycki
  *
  *      additional sources:
  *      - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
 #include <linux/string.h>
 
 #include <asm/addrspace.h>
+#include <asm/system.h>
+
 #include <asm/dec/interrupts.h>
 #include <asm/dec/ioasic.h>
 #include <asm/dec/ioasic_addrs.h>
 #include <asm/dec/kn01.h>
 #include <asm/dec/machtype.h>
+#include <asm/dec/system.h>
 #include <asm/dec/tc.h>
-#include <asm/system.h>
 
 static char version[] __devinitdata =
 "declance.c: v0.009 by Linux MIPS DECstation task force\n";
@@ -79,10 +81,6 @@ MODULE_LICENSE("GPL");
 #define PMAD_LANCE 2
 #define PMAX_LANCE 3
 
-#ifndef CONFIG_TC
-unsigned long system_base;
-unsigned long dmaptr;
-#endif
 
 #define LE_CSR0 0
 #define LE_CSR1 1
@@ -237,7 +235,7 @@ struct lance_init_block {
 /*
  * This works *only* for the ring descriptors
  */
-#define LANCE_ADDR(x) (PHYSADDR(x) >> 1)
+#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1)
 
 struct lance_private {
        struct net_device *next;
@@ -697,12 +695,13 @@ out:
        spin_unlock(&lp->lock);
 }
 
-static void lance_dma_merr_int(const int irq, void *dev_id,
-                               struct pt_regs *regs)
+static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id,
+                                     struct pt_regs *regs)
 {
        struct net_device *dev = (struct net_device *) dev_id;
 
        printk("%s: DMA error\n", dev->name);
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t
@@ -1026,10 +1025,6 @@ static int __init dec_lance_init(const int type, const int slot)
        unsigned long esar_base;
        unsigned char *esar;
 
-#ifndef CONFIG_TC
-       system_base = KN01_LANCE_BASE;
-#endif
-
        if (dec_lance_debug && version_printed++ == 0)
                printk(version);
 
@@ -1062,16 +1057,16 @@ static int __init dec_lance_init(const int type, const int slot)
        switch (type) {
 #ifdef CONFIG_TC
        case ASIC_LANCE:
-               dev->base_addr = system_base + IOASIC_LANCE;
+               dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
 
                /* buffer space for the on-board LANCE shared memory */
                /*
                 * FIXME: ugly hack!
                 */
-               dev->mem_start = KSEG1ADDR(0x00020000);
+               dev->mem_start = CKSEG1ADDR(0x00020000);
                dev->mem_end = dev->mem_start + 0x00020000;
                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
-               esar_base = system_base + IOASIC_ESAR;
+               esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
 
                /* Workaround crash with booting KN04 2.1k from Disk */
                memset((void *)dev->mem_start, 0,
@@ -1101,14 +1096,14 @@ static int __init dec_lance_init(const int type, const int slot)
                /* Setup I/O ASIC LANCE DMA.  */
                lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
                ioasic_write(IO_REG_LANCE_DMA_P,
-                            PHYSADDR(dev->mem_start) << 3);
+                            CPHYSADDR(dev->mem_start) << 3);
 
                break;
 
        case PMAD_LANCE:
                claim_tc_card(slot);
 
-               dev->mem_start = get_tc_base_addr(slot);
+               dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
                dev->base_addr = dev->mem_start + 0x100000;
                dev->irq = get_tc_irq_nr(slot);
                esar_base = dev->mem_start + 0x1c0002;
@@ -1137,9 +1132,9 @@ static int __init dec_lance_init(const int type, const int slot)
 
        case PMAX_LANCE:
                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
-               dev->base_addr = KN01_LANCE_BASE;
-               dev->mem_start = KN01_LANCE_BASE + 0x01000000;
-               esar_base = KN01_RTC_BASE + 1;
+               dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
+               dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+               esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
                lp->dma_irq = -1;
 
                /*
index 40887f09b681ac8253e9faaa54c2c41aadb5c480..eb169a8e877316022c272bee1972bd3f02d92901 100644 (file)
@@ -2201,6 +2201,7 @@ static struct ethtool_ops e100_ethtool_ops = {
        .phys_id                = e100_phys_id,
        .get_stats_count        = e100_get_stats_count,
        .get_ethtool_stats      = e100_get_ethtool_stats,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -2351,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
        e100_phy_init(nic);
 
        memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
-       if(!is_valid_ether_addr(netdev->dev_addr)) {
+       memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
+       if(!is_valid_ether_addr(netdev->perm_addr)) {
                DPRINTK(PROBE, ERR, "Invalid MAC address from "
                        "EEPROM, aborting.\n");
                err = -EAGAIN;
index 092757bc721f0ade1821465d73bedba39348ee89..3f653a93e1bc78dc9ab7e730bdc9490cac4f5650 100644 (file)
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
 
 #define BAR_0          0
 #define BAR_1          1
@@ -165,10 +169,33 @@ struct e1000_buffer {
        uint16_t next_to_watch;
 };
 
-struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; };
-struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; };
+struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
+
+struct e1000_tx_ring {
+       /* pointer to the descriptor ring memory */
+       void *desc;
+       /* physical address of the descriptor ring */
+       dma_addr_t dma;
+       /* length of descriptor ring in bytes */
+       unsigned int size;
+       /* number of descriptors in the ring */
+       unsigned int count;
+       /* next descriptor to associate a buffer with */
+       unsigned int next_to_use;
+       /* next descriptor to check for DD status bit */
+       unsigned int next_to_clean;
+       /* array of buffer information structs */
+       struct e1000_buffer *buffer_info;
+
+       struct e1000_buffer previous_buffer_info;
+       spinlock_t tx_lock;
+       uint16_t tdh;
+       uint16_t tdt;
+       uint64_t pkt;
+};
 
-struct e1000_desc_ring {
+struct e1000_rx_ring {
        /* pointer to the descriptor ring memory */
        void *desc;
        /* physical address of the descriptor ring */
@@ -186,6 +213,10 @@ struct e1000_desc_ring {
        /* arrays of page information for packet split */
        struct e1000_ps_page *ps_page;
        struct e1000_ps_page_dma *ps_page_dma;
+
+       uint16_t rdh;
+       uint16_t rdt;
+       uint64_t pkt;
 };
 
 #define E1000_DESC_UNUSED(R) \
@@ -227,9 +258,10 @@ struct e1000_adapter {
        unsigned long led_status;
 
        /* TX */
-       struct e1000_desc_ring tx_ring;
-       struct e1000_buffer previous_buffer_info;
-       spinlock_t tx_lock;
+       struct e1000_tx_ring *tx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_MQ
+       struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
+#endif
        uint32_t txd_cmd;
        uint32_t tx_int_delay;
        uint32_t tx_abs_int_delay;
@@ -246,19 +278,33 @@ struct e1000_adapter {
 
        /* RX */
 #ifdef CONFIG_E1000_NAPI
-       boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done,
-                         int work_to_do);
+       boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+                              struct e1000_rx_ring *rx_ring,
+                              int *work_done, int work_to_do);
 #else
-       boolean_t (*clean_rx) (struct e1000_adapter *adapter);
+       boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+                              struct e1000_rx_ring *rx_ring);
 #endif
-       void (*alloc_rx_buf) (struct e1000_adapter *adapter);
-       struct e1000_desc_ring rx_ring;
+       void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rx_ring);
+       struct e1000_rx_ring *rx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_NAPI
+       struct net_device *polling_netdev;  /* One per active queue */
+#endif
+#ifdef CONFIG_E1000_MQ
+       struct net_device **cpu_netdev;     /* per-cpu */
+       struct call_async_data_struct rx_sched_call_data;
+       int cpu_for_queue[4];
+#endif
+       int num_queues;
+
        uint64_t hw_csum_err;
        uint64_t hw_csum_good;
+       uint64_t rx_hdr_split;
        uint32_t rx_int_delay;
        uint32_t rx_abs_int_delay;
        boolean_t rx_csum;
-       boolean_t rx_ps;
+       unsigned int rx_ps_pages;
        uint32_t gorcl;
        uint64_t gorcl_old;
        uint16_t rx_ps_bsize0;
@@ -278,8 +324,8 @@ struct e1000_adapter {
        struct e1000_phy_stats phy_stats;
 
        uint32_t test_icr;
-       struct e1000_desc_ring test_tx_ring;
-       struct e1000_desc_ring test_rx_ring;
+       struct e1000_tx_ring test_tx_ring;
+       struct e1000_rx_ring test_rx_ring;
 
 
        int msg_enable;
index f133ff0b0b947c6ad6a76635ff5f159f6cad83bd..6b9acc7f94a32e12770fc4a3946d0876c8a5af11 100644 (file)
@@ -39,10 +39,10 @@ extern int e1000_up(struct e1000_adapter *adapter);
 extern void e1000_down(struct e1000_adapter *adapter);
 extern void e1000_reset(struct e1000_adapter *adapter);
 extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
-extern int e1000_setup_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_tx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 extern void e1000_update_stats(struct e1000_adapter *adapter);
 
 struct e1000_stats {
@@ -91,7 +91,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
        { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
        { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
-       { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }
+       { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+       { "rx_header_split", E1000_STAT(rx_hdr_split) },
 };
 #define E1000_STATS_LEN        \
        sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
@@ -546,8 +547,10 @@ e1000_set_eeprom(struct net_device *netdev,
        ret_val = e1000_write_eeprom(hw, first_word,
                                     last_word - first_word + 1, eeprom_buff);
 
-       /* Update the checksum over the first part of the EEPROM if needed */
-       if((ret_val == 0) && first_word <= EEPROM_CHECKSUM_REG)
+       /* Update the checksum over the first part of the EEPROM if needed 
+        * and flush shadow RAM for 82573 conrollers */
+       if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 
+                               (hw->mac_type == e1000_82573)))
                e1000_update_eeprom_checksum(hw);
 
        kfree(eeprom_buff);
@@ -576,8 +579,8 @@ e1000_get_ringparam(struct net_device *netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        e1000_mac_type mac_type = adapter->hw.mac_type;
-       struct e1000_desc_ring *txdr = &adapter->tx_ring;
-       struct e1000_desc_ring *rxdr = &adapter->rx_ring;
+       struct e1000_tx_ring *txdr = adapter->tx_ring;
+       struct e1000_rx_ring *rxdr = adapter->rx_ring;
 
        ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
                E1000_MAX_82544_RXD;
@@ -597,20 +600,40 @@ e1000_set_ringparam(struct net_device *netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        e1000_mac_type mac_type = adapter->hw.mac_type;
-       struct e1000_desc_ring *txdr = &adapter->tx_ring;
-       struct e1000_desc_ring *rxdr = &adapter->rx_ring;
-       struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new;
-       int err;
+       struct e1000_tx_ring *txdr, *tx_old, *tx_new;
+       struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
+       int i, err, tx_ring_size, rx_ring_size;
+
+       tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+       rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+
+       if (netif_running(adapter->netdev))
+               e1000_down(adapter);
 
        tx_old = adapter->tx_ring;
        rx_old = adapter->rx_ring;
 
+       adapter->tx_ring = kmalloc(tx_ring_size, GFP_KERNEL);
+       if (!adapter->tx_ring) {
+               err = -ENOMEM;
+               goto err_setup_rx;
+       }
+       memset(adapter->tx_ring, 0, tx_ring_size);
+
+       adapter->rx_ring = kmalloc(rx_ring_size, GFP_KERNEL);
+       if (!adapter->rx_ring) {
+               kfree(adapter->tx_ring);
+               err = -ENOMEM;
+               goto err_setup_rx;
+       }
+       memset(adapter->rx_ring, 0, rx_ring_size);
+
+       txdr = adapter->tx_ring;
+       rxdr = adapter->rx_ring;
+
        if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       if(netif_running(adapter->netdev))
-               e1000_down(adapter);
-
        rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
        rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
                E1000_MAX_RXD : E1000_MAX_82544_RXD));
@@ -621,11 +644,16 @@ e1000_set_ringparam(struct net_device *netdev,
                E1000_MAX_TXD : E1000_MAX_82544_TXD));
        E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 
 
+       for (i = 0; i < adapter->num_queues; i++) {
+               txdr[i].count = txdr->count;
+               rxdr[i].count = rxdr->count;
+       }
+
        if(netif_running(adapter->netdev)) {
                /* Try to get new resources before deleting old */
-               if((err = e1000_setup_rx_resources(adapter)))
+               if ((err = e1000_setup_all_rx_resources(adapter)))
                        goto err_setup_rx;
-               if((err = e1000_setup_tx_resources(adapter)))
+               if ((err = e1000_setup_all_tx_resources(adapter)))
                        goto err_setup_tx;
 
                /* save the new, restore the old in order to free it,
@@ -635,8 +663,10 @@ e1000_set_ringparam(struct net_device *netdev,
                tx_new = adapter->tx_ring;
                adapter->rx_ring = rx_old;
                adapter->tx_ring = tx_old;
-               e1000_free_rx_resources(adapter);
-               e1000_free_tx_resources(adapter);
+               e1000_free_all_rx_resources(adapter);
+               e1000_free_all_tx_resources(adapter);
+               kfree(tx_old);
+               kfree(rx_old);
                adapter->rx_ring = rx_new;
                adapter->tx_ring = tx_new;
                if((err = e1000_up(adapter)))
@@ -645,7 +675,7 @@ e1000_set_ringparam(struct net_device *netdev,
 
        return 0;
 err_setup_tx:
-       e1000_free_rx_resources(adapter);
+       e1000_free_all_rx_resources(adapter);
 err_setup_rx:
        adapter->rx_ring = rx_old;
        adapter->tx_ring = tx_old;
@@ -696,6 +726,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
         * Some bits that get toggled are ignored.
         */
         switch (adapter->hw.mac_type) {
+       /* there are several bits on newer hardware that are r/w */
+       case e1000_82571:
+       case e1000_82572:
+               toggle = 0x7FFFF3FF;
+               break;
        case e1000_82573:
                toggle = 0x7FFFF033;
                break;
@@ -898,8 +933,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 static void
 e1000_free_desc_rings(struct e1000_adapter *adapter)
 {
-       struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
-       struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+       struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+       struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
        struct pci_dev *pdev = adapter->pdev;
        int i;
 
@@ -941,8 +976,8 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
 static int
 e1000_setup_desc_rings(struct e1000_adapter *adapter)
 {
-       struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
-       struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+       struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+       struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
        struct pci_dev *pdev = adapter->pdev;
        uint32_t rctl;
        int size, i, ret_val;
@@ -1245,6 +1280,8 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
        case e1000_82541_rev_2:
        case e1000_82547:
        case e1000_82547_rev_2:
+       case e1000_82571:
+       case e1000_82572:
        case e1000_82573:
                return e1000_integrated_phy_loopback(adapter);
                break;
@@ -1340,8 +1377,8 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 static int
 e1000_run_loopback_test(struct e1000_adapter *adapter)
 {
-       struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
-       struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+       struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+       struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
        struct pci_dev *pdev = adapter->pdev;
        int i, j, k, l, lc, good_cnt, ret_val=0;
        unsigned long time;
@@ -1509,6 +1546,7 @@ e1000_diag_test(struct net_device *netdev,
                data[2] = 0;
                data[3] = 0;
        }
+       msleep_interruptible(4 * 1000);
 }
 
 static void
@@ -1625,7 +1663,7 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
        if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
                data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
 
-       if(adapter->hw.mac_type < e1000_82573) {
+       if(adapter->hw.mac_type < e1000_82571) {
                if(!adapter->blink_timer.function) {
                        init_timer(&adapter->blink_timer);
                        adapter->blink_timer.function = e1000_led_blink_callback;
@@ -1739,6 +1777,7 @@ struct ethtool_ops e1000_ethtool_ops = {
        .phys_id                = e1000_phys_id,
        .get_stats_count        = e1000_get_stats_count,
        .get_ethtool_stats      = e1000_get_ethtool_stats,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 void e1000_set_ethtool_ops(struct net_device *netdev)
index 045f5426ab9a68c6af41690f450839d77dca0110..8fc876da43b43fafb5c0f6f7455856da013b68f0 100644 (file)
@@ -83,14 +83,14 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
 
 static const
 uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
-    { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43,
-      22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58,
-      32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74,
-      43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90,
-      57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108,
-      73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124,
-      91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128,
-      108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128};
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
 
 
 /******************************************************************************
@@ -286,7 +286,6 @@ e1000_set_mac_type(struct e1000_hw *hw)
     case E1000_DEV_ID_82546GB_FIBER:
     case E1000_DEV_ID_82546GB_SERDES:
     case E1000_DEV_ID_82546GB_PCIE:
-    case E1000_DEV_ID_82546GB_QUAD_COPPER:
         hw->mac_type = e1000_82546_rev_3;
         break;
     case E1000_DEV_ID_82541EI:
@@ -305,8 +304,19 @@ e1000_set_mac_type(struct e1000_hw *hw)
     case E1000_DEV_ID_82547GI:
         hw->mac_type = e1000_82547_rev_2;
         break;
+    case E1000_DEV_ID_82571EB_COPPER:
+    case E1000_DEV_ID_82571EB_FIBER:
+    case E1000_DEV_ID_82571EB_SERDES:
+            hw->mac_type = e1000_82571;
+        break;
+    case E1000_DEV_ID_82572EI_COPPER:
+    case E1000_DEV_ID_82572EI_FIBER:
+    case E1000_DEV_ID_82572EI_SERDES:
+        hw->mac_type = e1000_82572;
+        break;
     case E1000_DEV_ID_82573E:
     case E1000_DEV_ID_82573E_IAMT:
+    case E1000_DEV_ID_82573L:
         hw->mac_type = e1000_82573;
         break;
     default:
@@ -315,6 +325,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
     }
 
     switch(hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
     case e1000_82573:
         hw->eeprom_semaphore_present = TRUE;
         /* fall through */
@@ -351,6 +363,8 @@ e1000_set_media_type(struct e1000_hw *hw)
     switch (hw->device_id) {
     case E1000_DEV_ID_82545GM_SERDES:
     case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82572EI_SERDES:
         hw->media_type = e1000_media_type_internal_serdes;
         break;
     default:
@@ -523,6 +537,8 @@ e1000_reset_hw(struct e1000_hw *hw)
             E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
             E1000_WRITE_FLUSH(hw);
             /* fall through */
+        case e1000_82571:
+        case e1000_82572:
             ret_val = e1000_get_auto_rd_done(hw);
             if(ret_val)
                 /* We don't want to continue accessing MAC registers. */
@@ -683,6 +699,9 @@ e1000_init_hw(struct e1000_hw *hw)
         switch (hw->mac_type) {
         default:
             break;
+        case e1000_82571:
+        case e1000_82572:
+            ctrl |= (1 << 22);
         case e1000_82573:
             ctrl |= E1000_TXDCTL_COUNT_DESC;
             break;
@@ -694,6 +713,26 @@ e1000_init_hw(struct e1000_hw *hw)
         e1000_enable_tx_pkt_filtering(hw); 
     }
 
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_82571:
+    case e1000_82572:
+        ctrl = E1000_READ_REG(hw, TXDCTL1);
+        ctrl &= ~E1000_TXDCTL_WTHRESH;
+        ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB;
+        ctrl |= (1 << 22);
+        E1000_WRITE_REG(hw, TXDCTL1, ctrl);
+        break;
+    }
+
+
+
+    if (hw->mac_type == e1000_82573) {
+        uint32_t gcr = E1000_READ_REG(hw, GCR);
+        gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+        E1000_WRITE_REG(hw, GCR, gcr);
+    }
 
     /* Clear all of the statistics registers (clear on read).  It is
      * important that we do this after we have tried to establish link
@@ -878,6 +917,14 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_setup_fiber_serdes_link");
 
+    /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+     * until explicitly turned off or a power cycle is performed.  A read to
+     * the register does not indicate its status.  Therefore, we ensure
+     * loopback mode is disabled during initialization.
+     */
+    if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+        E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
     /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
      * set when the optics detect a signal. On older adapters, it will be
      * cleared when there is a signal.  This applies to fiber media only.
@@ -2943,6 +2990,8 @@ e1000_phy_reset(struct e1000_hw *hw)
 
     switch (hw->mac_type) {
     case e1000_82541_rev_2:
+    case e1000_82571:
+    case e1000_82572:
         ret_val = e1000_phy_hw_reset(hw);
         if(ret_val)
             return ret_val;
@@ -2981,6 +3030,16 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_detect_gig_phy");
 
+    /* The 82571 firmware may still be configuring the PHY.  In this
+     * case, we cannot access the PHY until the configuration is done.  So
+     * we explicitly set the PHY values. */
+    if(hw->mac_type == e1000_82571 ||
+       hw->mac_type == e1000_82572) {
+        hw->phy_id = IGP01E1000_I_PHY_ID;
+        hw->phy_type = e1000_phy_igp_2;
+        return E1000_SUCCESS;
+    }
+
     /* Read the PHY ID Registers to identify which PHY is onboard. */
     ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
     if(ret_val)
@@ -3334,6 +3393,21 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
         eeprom->use_eerd = FALSE;
         eeprom->use_eewr = FALSE;
         break;
+    case e1000_82571:
+    case e1000_82572:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
     case e1000_82573:
         eeprom->type = e1000_eeprom_spi;
         eeprom->opcode_bits = 8;
@@ -3543,25 +3617,26 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
     eecd = E1000_READ_REG(hw, EECD);
 
     if (hw->mac_type != e1000_82573) {
-    /* Request EEPROM Access */
-    if(hw->mac_type > e1000_82544) {
-        eecd |= E1000_EECD_REQ;
-        E1000_WRITE_REG(hw, EECD, eecd);
-        eecd = E1000_READ_REG(hw, EECD);
-        while((!(eecd & E1000_EECD_GNT)) &&
-              (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
-            i++;
-            udelay(5);
-            eecd = E1000_READ_REG(hw, EECD);
-        }
-        if(!(eecd & E1000_EECD_GNT)) {
-            eecd &= ~E1000_EECD_REQ;
+        /* Request EEPROM Access */
+        if(hw->mac_type > e1000_82544) {
+            eecd |= E1000_EECD_REQ;
             E1000_WRITE_REG(hw, EECD, eecd);
-            DEBUGOUT("Could not acquire EEPROM grant\n");
-            return -E1000_ERR_EEPROM;
+            eecd = E1000_READ_REG(hw, EECD);
+            while((!(eecd & E1000_EECD_GNT)) &&
+                  (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+                i++;
+                udelay(5);
+                eecd = E1000_READ_REG(hw, EECD);
+            }
+            if(!(eecd & E1000_EECD_GNT)) {
+                eecd &= ~E1000_EECD_REQ;
+                E1000_WRITE_REG(hw, EECD, eecd);
+                DEBUGOUT("Could not acquire EEPROM grant\n");
+                e1000_put_hw_eeprom_semaphore(hw);
+                return -E1000_ERR_EEPROM;
+            }
         }
     }
-    }
 
     /* Setup EEPROM for Read/Write */
 
@@ -4064,7 +4139,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
         return -E1000_ERR_EEPROM;
     }
 
-    /* 82573 reads only through eerd */
+    /* 82573 writes only through eewr */
     if(eeprom->use_eewr == TRUE)
         return e1000_write_eeprom_eewr(hw, offset, words, data);
 
@@ -4353,9 +4428,16 @@ e1000_read_mac_addr(struct e1000_hw * hw)
         hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
         hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
     }
-    if(((hw->mac_type == e1000_82546) || (hw->mac_type == e1000_82546_rev_3)) &&
-       (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1))
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_82546:
+    case e1000_82546_rev_3:
+    case e1000_82571:
+        if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
             hw->perm_mac_addr[5] ^= 0x01;
+        break;
+    }
 
     for(i = 0; i < NODE_ADDRESS_SIZE; i++)
         hw->mac_addr[i] = hw->perm_mac_addr[i];
@@ -4385,6 +4467,12 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
     e1000_rar_set(hw, hw->mac_addr, 0);
 
     rar_num = E1000_RAR_ENTRIES;
+
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+        rar_num -= 1;
     /* Zero out the other 15 receive addresses. */
     DEBUGOUT("Clearing RAR[1-15]\n");
     for(i = 1; i < rar_num; i++) {
@@ -4427,6 +4515,12 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
     /* Clear RAR[1-15] */
     DEBUGOUT(" Clearing RAR[1-15]\n");
     num_rar_entry = E1000_RAR_ENTRIES;
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+        num_rar_entry -= 1;
+
     for(i = rar_used_count; i < num_rar_entry; i++) {
         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
         E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -4984,7 +5078,6 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
     temp = E1000_READ_REG(hw, ICTXQEC);
     temp = E1000_READ_REG(hw, ICTXQMTC);
     temp = E1000_READ_REG(hw, ICRXDMTC);
-
 }
 
 /******************************************************************************
@@ -5151,6 +5244,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
         hw->bus_speed = e1000_bus_speed_unknown;
         hw->bus_width = e1000_bus_width_unknown;
         break;
+    case e1000_82571:
+    case e1000_82572:
     case e1000_82573:
         hw->bus_type = e1000_bus_type_pci_express;
         hw->bus_speed = e1000_bus_speed_2500;
@@ -5250,6 +5345,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
     int32_t ret_val;
     uint16_t agc_value = 0;
     uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+    uint16_t max_agc = 0;
     uint16_t i, phy_data;
     uint16_t cable_length;
 
@@ -5338,6 +5434,40 @@ e1000_get_cable_length(struct e1000_hw *hw,
                        IGP01E1000_AGC_RANGE) : 0;
         *max_length = e1000_igp_cable_length_table[agc_value] +
                       IGP01E1000_AGC_RANGE;
+    } else if (hw->phy_type == e1000_phy_igp_2) {
+        uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP02E1000_PHY_AGC_A,
+                                                          IGP02E1000_PHY_AGC_B,
+                                                          IGP02E1000_PHY_AGC_C,
+                                                          IGP02E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+           /* Getting bits 15:9, which represent the combination of course and
+             * fine gain values.  The result is a number that can be put into
+             * the lookup table to obtain the approximate cable length. */
+            cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                      IGP02E1000_AGC_LENGTH_MASK;
+
+            /* Remove min & max AGC values from calculation. */
+            if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc])
+                min_agc = cur_agc;
+           if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc])
+                max_agc = cur_agc;
+
+            agc_value += e1000_igp_2_cable_length_table[cur_agc];
+        }
+
+        agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]);
+        agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+        /* Calculate cable length with the error range of +/- 10 meters. */
+        *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                       (agc_value - IGP02E1000_AGC_RANGE) : 0;
+        *max_length = agc_value + IGP02E1000_AGC_RANGE;
     }
 
     return E1000_SUCCESS;
@@ -6465,6 +6595,8 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
     default:
         msec_delay(5);
         break;
+    case e1000_82571:
+    case e1000_82572:
     case e1000_82573:
         while(timeout) {
             if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
@@ -6494,10 +6626,31 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
 int32_t
 e1000_get_phy_cfg_done(struct e1000_hw *hw)
 {
+    int32_t timeout = PHY_CFG_TIMEOUT;
+    uint32_t cfg_mask = E1000_EEPROM_CFG_DONE;
+
     DEBUGFUNC("e1000_get_phy_cfg_done");
 
-    /* Simply wait for 10ms */
-    msec_delay(10);
+    switch (hw->mac_type) {
+    default:
+        msec_delay(10);
+        break;
+    case e1000_82571:
+    case e1000_82572:
+        while (timeout) {
+            if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask)
+                break;
+            else
+                msec_delay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("MNG configuration cycle has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
 
     return E1000_SUCCESS;
 }
@@ -6569,8 +6722,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
         return;
 
     swsm = E1000_READ_REG(hw, SWSM);
-    /* Release both semaphores. */
-    swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+        swsm &= ~(E1000_SWSM_SWESMBI);
     E1000_WRITE_REG(hw, SWSM, swsm);
 }
 
@@ -6606,6 +6758,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
      * if this is the case.  We read FWSM to determine the manageability mode.
      */
     switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
     case e1000_82573:
         fwsm = E1000_READ_REG(hw, FWSM);
         if((fwsm & E1000_FWSM_MODE_MASK) != 0)
index 51c2b3a18b6f385f9c86698b7840cf70838025a0..4f2c196dc314cd68d79b9297e0b352c1118fa786 100644 (file)
@@ -57,6 +57,8 @@ typedef enum {
     e1000_82541_rev_2,
     e1000_82547,
     e1000_82547_rev_2,
+    e1000_82571,
+    e1000_82572,
     e1000_82573,
     e1000_num_macs
 } e1000_mac_type;
@@ -478,10 +480,16 @@ uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
 #define E1000_DEV_ID_82546GB_SERDES      0x107B
 #define E1000_DEV_ID_82546GB_PCIE        0x108A
 #define E1000_DEV_ID_82547EI             0x1019
+#define E1000_DEV_ID_82571EB_COPPER      0x105E
+#define E1000_DEV_ID_82571EB_FIBER       0x105F
+#define E1000_DEV_ID_82571EB_SERDES      0x1060
+#define E1000_DEV_ID_82572EI_COPPER      0x107D
+#define E1000_DEV_ID_82572EI_FIBER       0x107E
+#define E1000_DEV_ID_82572EI_SERDES      0x107F
 #define E1000_DEV_ID_82573E              0x108B
 #define E1000_DEV_ID_82573E_IAMT         0x108C
+#define E1000_DEV_ID_82573L              0x109A
 
-#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
 
 #define NODE_ADDRESS_SIZE 6
 #define ETH_LENGTH_OF_ADDRESS 6
@@ -833,6 +841,8 @@ struct e1000_ffvt_entry {
 #define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
 #define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
 
+#define E1000_DISABLE_SERDES_LOOPBACK   0x0400
+
 /* Register Set. (82543, 82544)
  *
  * Registers are defined to be 32 bits and  should be accessed as 32 bit values.
@@ -853,6 +863,7 @@ struct e1000_ffvt_entry {
 #define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
 #define E1000_FLA      0x0001C  /* Flash Access - RW */
 #define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
 #define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
 #define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
 #define E1000_FCT      0x00030  /* Flow Control Type - RW */
@@ -864,6 +875,12 @@ struct e1000_ffvt_entry {
 #define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
 #define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
 #define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
 #define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
 #define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
 #define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
@@ -895,6 +912,12 @@ struct e1000_ffvt_entry {
 #define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
 #define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
 #define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
 #define E1000_RXDCTL   0x02828  /* RX Descriptor Control - RW */
 #define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
 #define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
@@ -980,15 +1003,15 @@ struct e1000_ffvt_entry {
 #define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
 #define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
 #define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
-#define E1000_IAC       0x4100  /* Interrupt Assertion Count */
-#define E1000_ICRXPTC   0x4104  /* Interrupt Cause Rx Packet Timer Expire Count */
-#define E1000_ICRXATC   0x4108  /* Interrupt Cause Rx Absolute Timer Expire Count */
-#define E1000_ICTXPTC   0x410C  /* Interrupt Cause Tx Packet Timer Expire Count */
-#define E1000_ICTXATC   0x4110  /* Interrupt Cause Tx Absolute Timer Expire Count */
-#define E1000_ICTXQEC   0x4118  /* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQMTC  0x411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
-#define E1000_ICRXDMTC  0x4120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
-#define E1000_ICRXOC    0x4124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
 #define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
 #define E1000_RFCTL    0x05008  /* Receive Filter Control*/
 #define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
@@ -1018,6 +1041,14 @@ struct e1000_ffvt_entry {
 #define E1000_FWSM      0x05B54 /* FW Semaphore */
 #define E1000_FFLT_DBG  0x05F04 /* Debug Register */
 #define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
 /* Register Set (82542)
  *
  * Some of the 82542 registers are located at different offsets than they are
@@ -1032,6 +1063,7 @@ struct e1000_ffvt_entry {
 #define E1000_82542_CTRL_EXT E1000_CTRL_EXT
 #define E1000_82542_FLA      E1000_FLA
 #define E1000_82542_MDIC     E1000_MDIC
+#define E1000_82542_SCTL     E1000_SCTL
 #define E1000_82542_FCAL     E1000_FCAL
 #define E1000_82542_FCAH     E1000_FCAH
 #define E1000_82542_FCT      E1000_FCT
@@ -1049,6 +1081,18 @@ struct e1000_ffvt_entry {
 #define E1000_82542_RDLEN    0x00118
 #define E1000_82542_RDH      0x00120
 #define E1000_82542_RDT      0x00128
+#define E1000_82542_RDTR0    E1000_82542_RDTR
+#define E1000_82542_RDBAL0   E1000_82542_RDBAL
+#define E1000_82542_RDBAH0   E1000_82542_RDBAH
+#define E1000_82542_RDLEN0   E1000_82542_RDLEN
+#define E1000_82542_RDH0     E1000_82542_RDH
+#define E1000_82542_RDT0     E1000_82542_RDT
+#define E1000_82542_RDTR1    0x00130
+#define E1000_82542_RDBAL1   0x00138
+#define E1000_82542_RDBAH1   0x0013C
+#define E1000_82542_RDLEN1   0x00140
+#define E1000_82542_RDH1     0x00148
+#define E1000_82542_RDT1     0x00150
 #define E1000_82542_FCRTH    0x00160
 #define E1000_82542_FCRTL    0x00168
 #define E1000_82542_FCTTV    E1000_FCTTV
@@ -1197,6 +1241,13 @@ struct e1000_ffvt_entry {
 #define E1000_82542_ICRXOC      E1000_ICRXOC
 #define E1000_82542_HICR        E1000_HICR
 
+#define E1000_82542_CPUVEC      E1000_CPUVEC
+#define E1000_82542_MRQC        E1000_MRQC
+#define E1000_82542_RETA        E1000_RETA
+#define E1000_82542_RSSRK       E1000_RSSRK
+#define E1000_82542_RSSIM       E1000_RSSIM
+#define E1000_82542_RSSIR       E1000_RSSIR
+
 /* Statistics counters collected by the MAC */
 struct e1000_hw_stats {
     uint64_t crcerrs;
@@ -1336,6 +1387,7 @@ struct e1000_hw {
     boolean_t serdes_link_down;
     boolean_t tbi_compatibility_en;
     boolean_t tbi_compatibility_on;
+    boolean_t laa_is_present;
     boolean_t phy_reset_disable;
     boolean_t fc_send_xon;
     boolean_t fc_strict_ieee;
@@ -1374,6 +1426,7 @@ struct e1000_hw {
 #define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
 #define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
 #define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
 #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
@@ -1491,6 +1544,8 @@ struct e1000_hw {
 #define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
 #define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
 #define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_CANC           0x04000000  /* Interrupt delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000  /* Driver loaded bit for FW */
 #define E1000_CTRL_EXT_IAME           0x08000000  /* Interrupt acknowledge Auto-mask */
 #define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000  /* Clear Interrupt timers after IMS clear */
 
@@ -1524,6 +1579,7 @@ struct e1000_hw {
 #define E1000_LEDCTL_LED2_BLINK           0x00800000
 #define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
 #define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
 #define E1000_LEDCTL_LED3_IVRT            0x40000000
 #define E1000_LEDCTL_LED3_BLINK           0x80000000
 
@@ -1784,6 +1840,16 @@ struct e1000_hw {
 #define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
 #define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
 
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK              0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q            0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT           0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK           0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP       0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4           0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX        0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6           0x00100000
 
 /* Definitions for power management and wakeup registers */
 /* Wake Up Control */
@@ -1928,6 +1994,7 @@ struct e1000_host_command_info {
 #define E1000_MDALIGN          4096
 
 #define E1000_GCR_BEM32                 0x00400000
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
 /* Function Active and Power State to MNG */
 #define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
 #define E1000_FACTPS_LAN0_VALID                     0x00000004
@@ -1980,6 +2047,7 @@ struct e1000_host_command_info {
 /* EEPROM Word Offsets */
 #define EEPROM_COMPAT                 0x0003
 #define EEPROM_ID_LED_SETTINGS        0x0004
+#define EEPROM_VERSION                0x0005
 #define EEPROM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
 #define EEPROM_PHY_CLASS_WORD         0x0007
 #define EEPROM_INIT_CONTROL1_REG      0x000A
@@ -1990,6 +2058,8 @@ struct e1000_host_command_info {
 #define EEPROM_FLASH_VERSION          0x0032
 #define EEPROM_CHECKSUM_REG           0x003F
 
+#define E1000_EEPROM_CFG_DONE         0x00040000   /* MNG config cycle done */
+
 /* Word definitions for ID LED Settings */
 #define ID_LED_RESERVED_0000 0x0000
 #define ID_LED_RESERVED_FFFF 0xFFFF
@@ -2108,6 +2178,8 @@ struct e1000_host_command_info {
 #define E1000_PBA_22K 0x0016
 #define E1000_PBA_24K 0x0018
 #define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_38K 0x0026
 #define E1000_PBA_40K 0x0028
 #define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
 
@@ -2592,11 +2664,11 @@ struct e1000_host_command_info {
 
 /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
 #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
-#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
 
 /* The precision error of the cable length is +/- 10 meters */
 #define IGP01E1000_AGC_RANGE    10
-#define IGP02E1000_AGC_RANGE    10
+#define IGP02E1000_AGC_RANGE    15
 
 /* IGP01E1000 PCS Initialization register */
 /* bits 3:6 in the PCS registers stores the channels polarity */
index ee687c902a20be52d0bdf95ef4fc58b03f5317a3..6b72f6acdd54eca353552cba53c111bff4efb66d 100644 (file)
@@ -43,7 +43,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION            "6.0.60-k2"DRIVERNAPI
+#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
 
@@ -80,6 +80,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x1026),
        INTEL_E1000_ETHERNET_DEVICE(0x1027),
        INTEL_E1000_ETHERNET_DEVICE(0x1028),
+       INTEL_E1000_ETHERNET_DEVICE(0x105E),
+       INTEL_E1000_ETHERNET_DEVICE(0x105F),
+       INTEL_E1000_ETHERNET_DEVICE(0x1060),
        INTEL_E1000_ETHERNET_DEVICE(0x1075),
        INTEL_E1000_ETHERNET_DEVICE(0x1076),
        INTEL_E1000_ETHERNET_DEVICE(0x1077),
@@ -88,10 +91,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x107A),
        INTEL_E1000_ETHERNET_DEVICE(0x107B),
        INTEL_E1000_ETHERNET_DEVICE(0x107C),
+       INTEL_E1000_ETHERNET_DEVICE(0x107D),
+       INTEL_E1000_ETHERNET_DEVICE(0x107E),
+       INTEL_E1000_ETHERNET_DEVICE(0x107F),
        INTEL_E1000_ETHERNET_DEVICE(0x108A),
        INTEL_E1000_ETHERNET_DEVICE(0x108B),
        INTEL_E1000_ETHERNET_DEVICE(0x108C),
-       INTEL_E1000_ETHERNET_DEVICE(0x1099),
+       INTEL_E1000_ETHERNET_DEVICE(0x109A),
        /* required last entry */
        {0,}
 };
@@ -102,10 +108,18 @@ int e1000_up(struct e1000_adapter *adapter);
 void e1000_down(struct e1000_adapter *adapter);
 void e1000_reset(struct e1000_adapter *adapter);
 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
-int e1000_setup_tx_resources(struct e1000_adapter *adapter);
-int e1000_setup_rx_resources(struct e1000_adapter *adapter);
-void e1000_free_tx_resources(struct e1000_adapter *adapter);
-void e1000_free_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+                             struct e1000_tx_ring *txdr);
+int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rxdr);
+void e1000_free_tx_resources(struct e1000_adapter *adapter,
+                             struct e1000_tx_ring *tx_ring);
+void e1000_free_rx_resources(struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rx_ring);
 void e1000_update_stats(struct e1000_adapter *adapter);
 
 /* Local Function Prototypes */
@@ -114,14 +128,22 @@ static int e1000_init_module(void);
 static void e1000_exit_module(void);
 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+#ifdef CONFIG_E1000_MQ
+static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
+#endif
 static int e1000_sw_init(struct e1000_adapter *adapter);
 static int e1000_open(struct net_device *netdev);
 static int e1000_close(struct net_device *netdev);
 static void e1000_configure_tx(struct e1000_adapter *adapter);
 static void e1000_configure_rx(struct e1000_adapter *adapter);
 static void e1000_setup_rctl(struct e1000_adapter *adapter);
-static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
-static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+                                struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+                                struct e1000_rx_ring *rx_ring);
 static void e1000_set_multi(struct net_device *netdev);
 static void e1000_update_phy_info(unsigned long data);
 static void e1000_watchdog(unsigned long data);
@@ -132,19 +154,26 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 static int e1000_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
-static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
+static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
+                                    struct e1000_tx_ring *tx_ring);
 #ifdef CONFIG_E1000_NAPI
-static int e1000_clean(struct net_device *netdev, int *budget);
+static int e1000_clean(struct net_device *poll_dev, int *budget);
 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+                                    struct e1000_rx_ring *rx_ring,
                                     int *work_done, int work_to_do);
 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+                                       struct e1000_rx_ring *rx_ring,
                                        int *work_done, int work_to_do);
 #else
-static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
-static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter);
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+                                    struct e1000_rx_ring *rx_ring);
+static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+                                       struct e1000_rx_ring *rx_ring);
 #endif
-static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+                                   struct e1000_rx_ring *rx_ring);
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+                                      struct e1000_rx_ring *rx_ring);
 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
                           int cmd);
@@ -172,6 +201,11 @@ static int e1000_resume(struct pci_dev *pdev);
 static void e1000_netpoll (struct net_device *netdev);
 #endif
 
+#ifdef CONFIG_E1000_MQ
+/* for multiple Rx queues */
+void e1000_rx_schedule(void *data);
+#endif
+
 /* Exported from other modules */
 
 extern void e1000_check_options(struct e1000_adapter *adapter);
@@ -289,7 +323,7 @@ int
 e1000_up(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       int err;
+       int i, err;
 
        /* hardware has been reset, we need to reload some things */
 
@@ -308,7 +342,8 @@ e1000_up(struct e1000_adapter *adapter)
        e1000_configure_tx(adapter);
        e1000_setup_rctl(adapter);
        e1000_configure_rx(adapter);
-       adapter->alloc_rx_buf(adapter);
+       for (i = 0; i < adapter->num_queues; i++)
+               adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
 
 #ifdef CONFIG_PCI_MSI
        if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -344,6 +379,9 @@ e1000_down(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        e1000_irq_disable(adapter);
+#ifdef CONFIG_E1000_MQ
+       while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
+#endif
        free_irq(adapter->pdev->irq, netdev);
 #ifdef CONFIG_PCI_MSI
        if(adapter->hw.mac_type > e1000_82547_rev_2 &&
@@ -363,11 +401,10 @@ e1000_down(struct e1000_adapter *adapter)
        netif_stop_queue(netdev);
 
        e1000_reset(adapter);
-       e1000_clean_tx_ring(adapter);
-       e1000_clean_rx_ring(adapter);
+       e1000_clean_all_tx_rings(adapter);
+       e1000_clean_all_rx_rings(adapter);
 
-       /* If WoL is not enabled
-        * and management mode is not IAMT
+       /* If WoL is not enabled and management mode is not IAMT
         * Power down the PHY so no link is implied when interface is down */
        if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
           adapter->hw.media_type == e1000_media_type_copper &&
@@ -398,6 +435,10 @@ e1000_reset(struct e1000_adapter *adapter)
        case e1000_82547_rev_2:
                pba = E1000_PBA_30K;
                break;
+       case e1000_82571:
+       case e1000_82572:
+               pba = E1000_PBA_38K;
+               break;
        case e1000_82573:
                pba = E1000_PBA_12K;
                break;
@@ -475,6 +516,7 @@ e1000_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct e1000_adapter *adapter;
        unsigned long mmio_start, mmio_len;
+       uint32_t ctrl_ext;
        uint32_t swsm;
 
        static int cards_found = 0;
@@ -614,8 +656,9 @@ e1000_probe(struct pci_dev *pdev,
        if(e1000_read_mac_addr(&adapter->hw))
                DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
        memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+       memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
 
-       if(!is_valid_ether_addr(netdev->dev_addr)) {
+       if(!is_valid_ether_addr(netdev->perm_addr)) {
                DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
                err = -EIO;
                goto err_eeprom;
@@ -687,6 +730,12 @@ e1000_probe(struct pci_dev *pdev,
 
        /* Let firmware know the driver has taken over */
        switch(adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+               break;
        case e1000_82573:
                swsm = E1000_READ_REG(&adapter->hw, SWSM);
                E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -731,7 +780,11 @@ e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       uint32_t ctrl_ext;
        uint32_t manc, swsm;
+#ifdef CONFIG_E1000_NAPI
+       int i;
+#endif
 
        flush_scheduled_work();
 
@@ -745,6 +798,12 @@ e1000_remove(struct pci_dev *pdev)
        }
 
        switch(adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+               break;
        case e1000_82573:
                swsm = E1000_READ_REG(&adapter->hw, SWSM);
                E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -756,13 +815,27 @@ e1000_remove(struct pci_dev *pdev)
        }
 
        unregister_netdev(netdev);
+#ifdef CONFIG_E1000_NAPI
+       for (i = 0; i < adapter->num_queues; i++)
+               __dev_put(&adapter->polling_netdev[i]);
+#endif
 
        if(!e1000_check_phy_reset_block(&adapter->hw))
                e1000_phy_hw_reset(&adapter->hw);
 
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+#ifdef CONFIG_E1000_NAPI
+       kfree(adapter->polling_netdev);
+#endif
+
        iounmap(adapter->hw.hw_addr);
        pci_release_regions(pdev);
 
+#ifdef CONFIG_E1000_MQ
+       free_percpu(adapter->cpu_netdev);
+       free_percpu(adapter->cpu_tx_ring);
+#endif
        free_netdev(netdev);
 
        pci_disable_device(pdev);
@@ -783,6 +856,9 @@ e1000_sw_init(struct e1000_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
+#ifdef CONFIG_E1000_NAPI
+       int i;
+#endif
 
        /* PCI config space info */
 
@@ -840,13 +916,122 @@ e1000_sw_init(struct e1000_adapter *adapter)
                hw->master_slave = E1000_MASTER_SLAVE;
        }
 
+#ifdef CONFIG_E1000_MQ
+       /* Number of supported queues */
+       switch (hw->mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               adapter->num_queues = 2;
+               break;
+       default:
+               adapter->num_queues = 1;
+               break;
+       }
+       adapter->num_queues = min(adapter->num_queues, num_online_cpus());
+#else
+       adapter->num_queues = 1;
+#endif
+
+       if (e1000_alloc_queues(adapter)) {
+               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+               return -ENOMEM;
+       }
+
+#ifdef CONFIG_E1000_NAPI
+       for (i = 0; i < adapter->num_queues; i++) {
+               adapter->polling_netdev[i].priv = adapter;
+               adapter->polling_netdev[i].poll = &e1000_clean;
+               adapter->polling_netdev[i].weight = 64;
+               dev_hold(&adapter->polling_netdev[i]);
+               set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
+       }
+#endif
+
+#ifdef CONFIG_E1000_MQ
+       e1000_setup_queue_mapping(adapter);
+#endif
+
        atomic_set(&adapter->irq_sem, 1);
        spin_lock_init(&adapter->stats_lock);
-       spin_lock_init(&adapter->tx_lock);
 
        return 0;
 }
 
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+
+static int __devinit
+e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+       int size;
+
+       size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+       adapter->tx_ring = kmalloc(size, GFP_KERNEL);
+       if (!adapter->tx_ring)
+               return -ENOMEM;
+       memset(adapter->tx_ring, 0, size);
+
+       size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+       adapter->rx_ring = kmalloc(size, GFP_KERNEL);
+       if (!adapter->rx_ring) {
+               kfree(adapter->tx_ring);
+               return -ENOMEM;
+       }
+       memset(adapter->rx_ring, 0, size);
+
+#ifdef CONFIG_E1000_NAPI
+       size = sizeof(struct net_device) * adapter->num_queues;
+       adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
+       if (!adapter->polling_netdev) {
+               kfree(adapter->tx_ring);
+               kfree(adapter->rx_ring);
+               return -ENOMEM;
+       }
+       memset(adapter->polling_netdev, 0, size);
+#endif
+
+       return E1000_SUCCESS;
+}
+
+#ifdef CONFIG_E1000_MQ
+static void __devinit
+e1000_setup_queue_mapping(struct e1000_adapter *adapter)
+{
+       int i, cpu;
+
+       adapter->rx_sched_call_data.func = e1000_rx_schedule;
+       adapter->rx_sched_call_data.info = adapter->netdev;
+       cpus_clear(adapter->rx_sched_call_data.cpumask);
+
+       adapter->cpu_netdev = alloc_percpu(struct net_device *);
+       adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+
+       lock_cpu_hotplug();
+       i = 0;
+       for_each_online_cpu(cpu) {
+               *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
+               /* This is incomplete because we'd like to assign separate
+                * physical cpus to these netdev polling structures and
+                * avoid saturating a subset of cpus.
+                */
+               if (i < adapter->num_queues) {
+                       *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
+                       adapter->cpu_for_queue[i] = cpu;
+               } else
+                       *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
+
+               i++;
+       }
+       unlock_cpu_hotplug();
+}
+#endif
+
 /**
  * e1000_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -868,12 +1053,12 @@ e1000_open(struct net_device *netdev)
 
        /* allocate transmit descriptors */
 
-       if((err = e1000_setup_tx_resources(adapter)))
+       if ((err = e1000_setup_all_tx_resources(adapter)))
                goto err_setup_tx;
 
        /* allocate receive descriptors */
 
-       if((err = e1000_setup_rx_resources(adapter)))
+       if ((err = e1000_setup_all_rx_resources(adapter)))
                goto err_setup_rx;
 
        if((err = e1000_up(adapter)))
@@ -887,9 +1072,9 @@ e1000_open(struct net_device *netdev)
        return E1000_SUCCESS;
 
 err_up:
-       e1000_free_rx_resources(adapter);
+       e1000_free_all_rx_resources(adapter);
 err_setup_rx:
-       e1000_free_tx_resources(adapter);
+       e1000_free_all_tx_resources(adapter);
 err_setup_tx:
        e1000_reset(adapter);
 
@@ -915,8 +1100,8 @@ e1000_close(struct net_device *netdev)
 
        e1000_down(adapter);
 
-       e1000_free_tx_resources(adapter);
-       e1000_free_rx_resources(adapter);
+       e1000_free_all_tx_resources(adapter);
+       e1000_free_all_rx_resources(adapter);
 
        if((adapter->hw.mng_cookie.status &
                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
@@ -951,14 +1136,15 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
 /**
  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
  * @adapter: board private structure
+ * @txdr:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
 
 int
-e1000_setup_tx_resources(struct e1000_adapter *adapter)
+e1000_setup_tx_resources(struct e1000_adapter *adapter,
+                         struct e1000_tx_ring *txdr)
 {
-       struct e1000_desc_ring *txdr = &adapter->tx_ring;
        struct pci_dev *pdev = adapter->pdev;
        int size;
 
@@ -970,6 +1156,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
                return -ENOMEM;
        }
        memset(txdr->buffer_info, 0, size);
+       memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
 
        /* round up to nearest 4K */
 
@@ -1018,10 +1205,40 @@ setup_tx_desc_die:
 
        txdr->next_to_use = 0;
        txdr->next_to_clean = 0;
+       spin_lock_init(&txdr->tx_lock);
 
        return 0;
 }
 
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ *                               (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+       int i, err = 0;
+
+       for (i = 0; i < adapter->num_queues; i++) {
+               err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               if (err) {
+                       DPRINTK(PROBE, ERR,
+                               "Allocation for Tx Queue %u failed\n", i);
+                       break;
+               }
+       }
+
+       return err;
+}
+
 /**
  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
  * @adapter: board private structure
@@ -1032,23 +1249,43 @@ setup_tx_desc_die:
 static void
 e1000_configure_tx(struct e1000_adapter *adapter)
 {
-       uint64_t tdba = adapter->tx_ring.dma;
-       uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
-       uint32_t tctl, tipg;
-
-       E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
-       E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
-
-       E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
+       uint64_t tdba;
+       struct e1000_hw *hw = &adapter->hw;
+       uint32_t tdlen, tctl, tipg, tarc;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
 
-       E1000_WRITE_REG(&adapter->hw, TDH, 0);
-       E1000_WRITE_REG(&adapter->hw, TDT, 0);
+       switch (adapter->num_queues) {
+       case 2:
+               tdba = adapter->tx_ring[1].dma;
+               tdlen = adapter->tx_ring[1].count *
+                       sizeof(struct e1000_tx_desc);
+               E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
+               E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
+               E1000_WRITE_REG(hw, TDLEN1, tdlen);
+               E1000_WRITE_REG(hw, TDH1, 0);
+               E1000_WRITE_REG(hw, TDT1, 0);
+               adapter->tx_ring[1].tdh = E1000_TDH1;
+               adapter->tx_ring[1].tdt = E1000_TDT1;
+               /* Fall Through */
+       case 1:
+       default:
+               tdba = adapter->tx_ring[0].dma;
+               tdlen = adapter->tx_ring[0].count *
+                       sizeof(struct e1000_tx_desc);
+               E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+               E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+               E1000_WRITE_REG(hw, TDLEN, tdlen);
+               E1000_WRITE_REG(hw, TDH, 0);
+               E1000_WRITE_REG(hw, TDT, 0);
+               adapter->tx_ring[0].tdh = E1000_TDH;
+               adapter->tx_ring[0].tdt = E1000_TDT;
+               break;
+       }
 
        /* Set the default values for the Tx Inter Packet Gap timer */
 
-       switch (adapter->hw.mac_type) {
+       switch (hw->mac_type) {
        case e1000_82542_rev2_0:
        case e1000_82542_rev2_1:
                tipg = DEFAULT_82542_TIPG_IPGT;
@@ -1056,67 +1293,81 @@ e1000_configure_tx(struct e1000_adapter *adapter)
                tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
                break;
        default:
-               if(adapter->hw.media_type == e1000_media_type_fiber ||
-                  adapter->hw.media_type == e1000_media_type_internal_serdes)
+               if (hw->media_type == e1000_media_type_fiber ||
+                   hw->media_type == e1000_media_type_internal_serdes)
                        tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
                else
                        tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
                tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
                tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
        }
-       E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
+       E1000_WRITE_REG(hw, TIPG, tipg);
 
        /* Set the Tx Interrupt Delay register */
 
-       E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
-       if(adapter->hw.mac_type >= e1000_82540)
-               E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
+       E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+       if (hw->mac_type >= e1000_82540)
+               E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
 
        /* Program the Transmit Control Register */
 
-       tctl = E1000_READ_REG(&adapter->hw, TCTL);
+       tctl = E1000_READ_REG(hw, TCTL);
 
        tctl &= ~E1000_TCTL_CT;
-       tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
+       tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
 
-       E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+       E1000_WRITE_REG(hw, TCTL, tctl);
 
-       e1000_config_collision_dist(&adapter->hw);
+       if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+               tarc = E1000_READ_REG(hw, TARC0);
+               tarc |= ((1 << 25) | (1 << 21));
+               E1000_WRITE_REG(hw, TARC0, tarc);
+               tarc = E1000_READ_REG(hw, TARC1);
+               tarc |= (1 << 25);
+               if (tctl & E1000_TCTL_MULR)
+                       tarc &= ~(1 << 28);
+               else
+                       tarc |= (1 << 28);
+               E1000_WRITE_REG(hw, TARC1, tarc);
+       }
+
+       e1000_config_collision_dist(hw);
 
        /* Setup Transmit Descriptor Settings for eop descriptor */
        adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
                E1000_TXD_CMD_IFCS;
 
-       if(adapter->hw.mac_type < e1000_82543)
+       if (hw->mac_type < e1000_82543)
                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
        else
                adapter->txd_cmd |= E1000_TXD_CMD_RS;
 
        /* Cache if we're 82544 running in PCI-X because we'll
         * need this to apply a workaround later in the send path. */
-       if(adapter->hw.mac_type == e1000_82544 &&
-          adapter->hw.bus_type == e1000_bus_type_pcix)
+       if (hw->mac_type == e1000_82544 &&
+           hw->bus_type == e1000_bus_type_pcix)
                adapter->pcix_82544 = 1;
 }
 
 /**
  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
  * @adapter: board private structure
+ * @rxdr:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
 
 int
-e1000_setup_rx_resources(struct e1000_adapter *adapter)
+e1000_setup_rx_resources(struct e1000_adapter *adapter,
+                         struct e1000_rx_ring *rxdr)
 {
-       struct e1000_desc_ring *rxdr = &adapter->rx_ring;
        struct pci_dev *pdev = adapter->pdev;
        int size, desc_len;
 
        size = sizeof(struct e1000_buffer) * rxdr->count;
        rxdr->buffer_info = vmalloc(size);
-       if(!rxdr->buffer_info) {
+       if (!rxdr->buffer_info) {
                DPRINTK(PROBE, ERR,
                "Unable to allocate memory for the receive descriptor ring\n");
                return -ENOMEM;
@@ -1156,13 +1407,13 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
 
        rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
 
-       if(!rxdr->desc) {
+       if (!rxdr->desc) {
+               DPRINTK(PROBE, ERR,
+               "Unable to allocate memory for the receive descriptor ring\n");
 setup_rx_desc_die:
                vfree(rxdr->buffer_info);
                kfree(rxdr->ps_page);
                kfree(rxdr->ps_page_dma);
-               DPRINTK(PROBE, ERR,
-               "Unable to allocate memory for the receive descriptor ring\n");
                return -ENOMEM;
        }
 
@@ -1174,9 +1425,12 @@ setup_rx_desc_die:
                                     "at %p\n", rxdr->size, rxdr->desc);
                /* Try again, without freeing the previous */
                rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
-               if(!rxdr->desc) {
                /* Failed allocation, critical failure */
+               if (!rxdr->desc) {
                        pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+                       DPRINTK(PROBE, ERR,
+                               "Unable to allocate memory "
+                               "for the receive descriptor ring\n");
                        goto setup_rx_desc_die;
                }
 
@@ -1188,10 +1442,7 @@ setup_rx_desc_die:
                        DPRINTK(PROBE, ERR,
                                "Unable to allocate aligned memory "
                                "for the receive descriptor ring\n");
-                       vfree(rxdr->buffer_info);
-                       kfree(rxdr->ps_page);
-                       kfree(rxdr->ps_page_dma);
-                       return -ENOMEM;
+                       goto setup_rx_desc_die;
                } else {
                        /* Free old allocation, new allocation was successful */
                        pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
@@ -1205,16 +1456,49 @@ setup_rx_desc_die:
        return 0;
 }
 
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ *                               (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+       int i, err = 0;
+
+       for (i = 0; i < adapter->num_queues; i++) {
+               err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               if (err) {
+                       DPRINTK(PROBE, ERR,
+                               "Allocation for Rx Queue %u failed\n", i);
+                       break;
+               }
+       }
+
+       return err;
+}
+
 /**
  * e1000_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+                       (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
 static void
 e1000_setup_rctl(struct e1000_adapter *adapter)
 {
        uint32_t rctl, rfctl;
        uint32_t psrctl = 0;
+#ifdef CONFIG_E1000_PACKET_SPLIT
+       uint32_t pages = 0;
+#endif
 
        rctl = E1000_READ_REG(&adapter->hw, RCTL);
 
@@ -1235,7 +1519,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                rctl |= E1000_RCTL_LPE;
 
        /* Setup buffer sizes */
-       if(adapter->hw.mac_type == e1000_82573) {
+       if(adapter->hw.mac_type >= e1000_82571) {
                /* We can now specify buffers in 1K increments.
                 * BSIZE and BSEX are ignored in this case. */
                rctl |= adapter->rx_buffer_len << 0x11;
@@ -1268,11 +1552,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
         * followed by the page buffers.  Therefore, skb->data is
         * sized to hold the largest protocol header.
         */
-       adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 
-                         && (adapter->netdev->mtu 
-                             < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0));
+       pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+       if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
+           PAGE_SIZE <= 16384)
+               adapter->rx_ps_pages = pages;
+       else
+               adapter->rx_ps_pages = 0;
 #endif
-       if(adapter->rx_ps) {
+       if (adapter->rx_ps_pages) {
                /* Configure extra packet-split registers */
                rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
                rfctl |= E1000_RFCTL_EXTEN;
@@ -1284,12 +1571,19 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                
                psrctl |= adapter->rx_ps_bsize0 >>
                        E1000_PSRCTL_BSIZE0_SHIFT;
-               psrctl |= PAGE_SIZE >>
-                       E1000_PSRCTL_BSIZE1_SHIFT;
-               psrctl |= PAGE_SIZE <<
-                       E1000_PSRCTL_BSIZE2_SHIFT;
-               psrctl |= PAGE_SIZE <<
-                       E1000_PSRCTL_BSIZE3_SHIFT;
+
+               switch (adapter->rx_ps_pages) {
+               case 3:
+                       psrctl |= PAGE_SIZE <<
+                               E1000_PSRCTL_BSIZE3_SHIFT;
+               case 2:
+                       psrctl |= PAGE_SIZE <<
+                               E1000_PSRCTL_BSIZE2_SHIFT;
+               case 1:
+                       psrctl |= PAGE_SIZE >>
+                               E1000_PSRCTL_BSIZE1_SHIFT;
+                       break;
+               }
 
                E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
        }
@@ -1307,91 +1601,181 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
 static void
 e1000_configure_rx(struct e1000_adapter *adapter)
 {
-       uint64_t rdba = adapter->rx_ring.dma;
-       uint32_t rdlen, rctl, rxcsum;
+       uint64_t rdba;
+       struct e1000_hw *hw = &adapter->hw;
+       uint32_t rdlen, rctl, rxcsum, ctrl_ext;
+#ifdef CONFIG_E1000_MQ
+       uint32_t reta, mrqc;
+       int i;
+#endif
 
-       if(adapter->rx_ps) {
-               rdlen = adapter->rx_ring.count *
+       if (adapter->rx_ps_pages) {
+               rdlen = adapter->rx_ring[0].count *
                        sizeof(union e1000_rx_desc_packet_split);
                adapter->clean_rx = e1000_clean_rx_irq_ps;
                adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
        } else {
-               rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
+               rdlen = adapter->rx_ring[0].count *
+                       sizeof(struct e1000_rx_desc);
                adapter->clean_rx = e1000_clean_rx_irq;
                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
        }
 
        /* disable receives while setting up the descriptors */
-       rctl = E1000_READ_REG(&adapter->hw, RCTL);
-       E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
+       rctl = E1000_READ_REG(hw, RCTL);
+       E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
 
        /* set the Receive Delay Timer Register */
-       E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
+       E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
 
-       if(adapter->hw.mac_type >= e1000_82540) {
-               E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
+       if (hw->mac_type >= e1000_82540) {
+               E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
                if(adapter->itr > 1)
-                       E1000_WRITE_REG(&adapter->hw, ITR,
+                       E1000_WRITE_REG(hw, ITR,
                                1000000000 / (adapter->itr * 256));
        }
 
-       /* Setup the Base and Length of the Rx Descriptor Ring */
-       E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
-       E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
+       if (hw->mac_type >= e1000_82571) {
+               /* Reset delay timers after every interrupt */
+               ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+               ctrl_ext |= E1000_CTRL_EXT_CANC;
+               E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+               E1000_WRITE_FLUSH(hw);
+       }
+
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring */
+       switch (adapter->num_queues) {
+#ifdef CONFIG_E1000_MQ
+       case 2:
+               rdba = adapter->rx_ring[1].dma;
+               E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
+               E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
+               E1000_WRITE_REG(hw, RDLEN1, rdlen);
+               E1000_WRITE_REG(hw, RDH1, 0);
+               E1000_WRITE_REG(hw, RDT1, 0);
+               adapter->rx_ring[1].rdh = E1000_RDH1;
+               adapter->rx_ring[1].rdt = E1000_RDT1;
+               /* Fall Through */
+#endif
+       case 1:
+       default:
+               rdba = adapter->rx_ring[0].dma;
+               E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+               E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+               E1000_WRITE_REG(hw, RDLEN, rdlen);
+               E1000_WRITE_REG(hw, RDH, 0);
+               E1000_WRITE_REG(hw, RDT, 0);
+               adapter->rx_ring[0].rdh = E1000_RDH;
+               adapter->rx_ring[0].rdt = E1000_RDT;
+               break;
+       }
+
+#ifdef CONFIG_E1000_MQ
+       if (adapter->num_queues > 1) {
+               uint32_t random[10];
+
+               get_random_bytes(&random[0], 40);
+
+               if (hw->mac_type <= e1000_82572) {
+                       E1000_WRITE_REG(hw, RSSIR, 0);
+                       E1000_WRITE_REG(hw, RSSIM, 0);
+               }
+
+               switch (adapter->num_queues) {
+               case 2:
+               default:
+                       reta = 0x00800080;
+                       mrqc = E1000_MRQC_ENABLE_RSS_2Q;
+                       break;
+               }
+
+               /* Fill out redirection table */
+               for (i = 0; i < 32; i++)
+                       E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
+               /* Fill out hash function seeds */
+               for (i = 0; i < 10; i++)
+                       E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
+
+               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                        E1000_MRQC_RSS_FIELD_IPV4_TCP);
+               E1000_WRITE_REG(hw, MRQC, mrqc);
+       }
 
-       E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
+       /* Multiqueue and packet checksumming are mutually exclusive. */
+       if (hw->mac_type >= e1000_82571) {
+               rxcsum = E1000_READ_REG(hw, RXCSUM);
+               rxcsum |= E1000_RXCSUM_PCSD;
+               E1000_WRITE_REG(hw, RXCSUM, rxcsum);
+       }
 
-       /* Setup the HW Rx Head and Tail Descriptor Pointers */
-       E1000_WRITE_REG(&adapter->hw, RDH, 0);
-       E1000_WRITE_REG(&adapter->hw, RDT, 0);
+#else
 
        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
-       if(adapter->hw.mac_type >= e1000_82543) {
-               rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
+       if (hw->mac_type >= e1000_82543) {
+               rxcsum = E1000_READ_REG(hw, RXCSUM);
                if(adapter->rx_csum == TRUE) {
                        rxcsum |= E1000_RXCSUM_TUOFL;
 
-                       /* Enable 82573 IPv4 payload checksum for UDP fragments
+                       /* Enable 82571 IPv4 payload checksum for UDP fragments
                         * Must be used in conjunction with packet-split. */
-                       if((adapter->hw.mac_type > e1000_82547_rev_2) && 
-                          (adapter->rx_ps)) {
+                       if ((hw->mac_type >= e1000_82571) && 
+                          (adapter->rx_ps_pages)) {
                                rxcsum |= E1000_RXCSUM_IPPCSE;
                        }
                } else {
                        rxcsum &= ~E1000_RXCSUM_TUOFL;
                        /* don't need to clear IPPCSE as it defaults to 0 */
                }
-               E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
+               E1000_WRITE_REG(hw, RXCSUM, rxcsum);
        }
+#endif /* CONFIG_E1000_MQ */
 
-       if (adapter->hw.mac_type == e1000_82573)
-               E1000_WRITE_REG(&adapter->hw, ERT, 0x0100);
+       if (hw->mac_type == e1000_82573)
+               E1000_WRITE_REG(hw, ERT, 0x0100);
 
        /* Enable Receives */
-       E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+       E1000_WRITE_REG(hw, RCTL, rctl);
 }
 
 /**
- * e1000_free_tx_resources - Free Tx Resources
+ * e1000_free_tx_resources - Free Tx Resources per Queue
  * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
 
 void
-e1000_free_tx_resources(struct e1000_adapter *adapter)
+e1000_free_tx_resources(struct e1000_adapter *adapter,
+                        struct e1000_tx_ring *tx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
 
-       e1000_clean_tx_ring(adapter);
+       e1000_clean_tx_ring(adapter, tx_ring);
+
+       vfree(tx_ring->buffer_info);
+       tx_ring->buffer_info = NULL;
 
-       vfree(adapter->tx_ring.buffer_info);
-       adapter->tx_ring.buffer_info = NULL;
+       pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+       tx_ring->desc = NULL;
+}
 
-       pci_free_consistent(pdev, adapter->tx_ring.size,
-                           adapter->tx_ring.desc, adapter->tx_ring.dma);
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+       int i;
 
-       adapter->tx_ring.desc = NULL;
+       for (i = 0; i < adapter->num_queues; i++)
+               e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
 }
 
 static inline void
@@ -1414,21 +1798,22 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
 /**
  * e1000_clean_tx_ring - Free Tx Buffers
  * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
  **/
 
 static void
-e1000_clean_tx_ring(struct e1000_adapter *adapter)
+e1000_clean_tx_ring(struct e1000_adapter *adapter,
+                    struct e1000_tx_ring *tx_ring)
 {
-       struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
        struct e1000_buffer *buffer_info;
        unsigned long size;
        unsigned int i;
 
        /* Free all the Tx ring sk_buffs */
 
-       if (likely(adapter->previous_buffer_info.skb != NULL)) {
+       if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
                e1000_unmap_and_free_tx_resource(adapter,
-                               &adapter->previous_buffer_info);
+                               &tx_ring->previous_buffer_info);
        }
 
        for(i = 0; i < tx_ring->count; i++) {
@@ -1446,24 +1831,39 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 
-       E1000_WRITE_REG(&adapter->hw, TDH, 0);
-       E1000_WRITE_REG(&adapter->hw, TDT, 0);
+       writel(0, adapter->hw.hw_addr + tx_ring->tdh);
+       writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_queues; i++)
+               e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
 }
 
 /**
  * e1000_free_rx_resources - Free Rx Resources
  * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
 
 void
-e1000_free_rx_resources(struct e1000_adapter *adapter)
+e1000_free_rx_resources(struct e1000_adapter *adapter,
+                        struct e1000_rx_ring *rx_ring)
 {
-       struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
        struct pci_dev *pdev = adapter->pdev;
 
-       e1000_clean_rx_ring(adapter);
+       e1000_clean_rx_ring(adapter, rx_ring);
 
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
@@ -1478,14 +1878,31 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
 }
 
 /**
- * e1000_clean_rx_ring - Free Rx Buffers
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_queues; i++)
+               e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
  * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
  **/
 
 static void
-e1000_clean_rx_ring(struct e1000_adapter *adapter)
+e1000_clean_rx_ring(struct e1000_adapter *adapter,
+                    struct e1000_rx_ring *rx_ring)
 {
-       struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
        struct e1000_buffer *buffer_info;
        struct e1000_ps_page *ps_page;
        struct e1000_ps_page_dma *ps_page_dma;
@@ -1508,7 +1925,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
 
-                       for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+                       for(j = 0; j < adapter->rx_ps_pages; j++) {
                                if(!ps_page->ps_page[j]) break;
                                pci_unmap_single(pdev,
                                                 ps_page_dma->ps_page_dma[j],
@@ -1534,8 +1951,22 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       E1000_WRITE_REG(&adapter->hw, RDH, 0);
-       E1000_WRITE_REG(&adapter->hw, RDT, 0);
+       writel(0, adapter->hw.hw_addr + rx_ring->rdh);
+       writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_queues; i++)
+               e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
@@ -1556,7 +1987,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
        mdelay(5);
 
        if(netif_running(netdev))
-               e1000_clean_rx_ring(adapter);
+               e1000_clean_all_rx_rings(adapter);
 }
 
 static void
@@ -1576,7 +2007,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
 
        if(netif_running(netdev)) {
                e1000_configure_rx(adapter);
-               e1000_alloc_rx_buffers(adapter);
+               e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
        }
 }
 
@@ -1607,6 +2038,22 @@ e1000_set_mac(struct net_device *netdev, void *p)
 
        e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
 
+       /* With 82571 controllers, LAA may be overwritten (with the default)
+        * due to controller reset from the other port. */
+       if (adapter->hw.mac_type == e1000_82571) {
+               /* activate the work around */
+               adapter->hw.laa_is_present = 1;
+
+               /* Hold a copy of the LAA in RAR[14] This is done so that 
+                * between the time RAR[0] gets clobbered  and the time it 
+                * gets fixed (in e1000_watchdog), the actual LAA is in one 
+                * of the RARs and no incoming packets directed to this port
+                * are dropped. Eventaully the LAA will be in RAR[0] and 
+                * RAR[14] */
+               e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 
+                                       E1000_RAR_ENTRIES - 1);
+       }
+
        if(adapter->hw.mac_type == e1000_82542_rev2_0)
                e1000_leave_82542_rst(adapter);
 
@@ -1629,12 +2076,13 @@ e1000_set_multi(struct net_device *netdev)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        struct dev_mc_list *mc_ptr;
-       unsigned long flags;
        uint32_t rctl;
        uint32_t hash_value;
-       int i;
+       int i, rar_entries = E1000_RAR_ENTRIES;
 
-       spin_lock_irqsave(&adapter->tx_lock, flags);
+       /* reserve RAR[14] for LAA over-write work-around */
+       if (adapter->hw.mac_type == e1000_82571)
+               rar_entries--;
 
        /* Check for Promiscuous and All Multicast modes */
 
@@ -1659,11 +2107,12 @@ e1000_set_multi(struct net_device *netdev)
        /* load the first 14 multicast address into the exact filters 1-14
         * RAR 0 is used for the station MAC adddress
         * if there are not 14 addresses, go ahead and clear the filters
+        * -- with 82571 controllers only 0-13 entries are filled here
         */
        mc_ptr = netdev->mc_list;
 
-       for(i = 1; i < E1000_RAR_ENTRIES; i++) {
-               if(mc_ptr) {
+       for(i = 1; i < rar_entries; i++) {
+               if (mc_ptr) {
                        e1000_rar_set(hw, mc_ptr->dmi_addr, i);
                        mc_ptr = mc_ptr->next;
                } else {
@@ -1686,8 +2135,6 @@ e1000_set_multi(struct net_device *netdev)
 
        if(hw->mac_type == e1000_82542_rev2_0)
                e1000_leave_82542_rst(adapter);
-
-       spin_unlock_irqrestore(&adapter->tx_lock, flags);
 }
 
 /* Need to wait a few seconds after link up to get diagnostic information from
@@ -1759,7 +2206,7 @@ static void
 e1000_watchdog_task(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       struct e1000_desc_ring *txdr = &adapter->tx_ring;
+       struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
        uint32_t link;
 
        e1000_check_for_link(&adapter->hw);
@@ -1818,8 +2265,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 
        e1000_update_adaptive(&adapter->hw);
 
-       if(!netif_carrier_ok(netdev)) {
-               if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+       if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
+               if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
@@ -1847,6 +2294,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
        /* Force detection of hung controller every watchdog period */
        adapter->detect_tx_hung = TRUE;
 
+       /* With 82571 controllers, LAA may be overwritten due to controller 
+        * reset from the other port. Set the appropriate LAA in RAR[0] */
+       if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
+               e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+
        /* Reset the timer */
        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
 }
@@ -1859,7 +2311,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 #define E1000_TX_FLAGS_VLAN_SHIFT      16
 
 static inline int
-e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
+e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+          struct sk_buff *skb)
 {
 #ifdef NETIF_F_TSO
        struct e1000_context_desc *context_desc;
@@ -1910,8 +2363,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
                               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
 
-               i = adapter->tx_ring.next_to_use;
-               context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+               i = tx_ring->next_to_use;
+               context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 
                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
@@ -1923,8 +2376,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
-               if(++i == adapter->tx_ring.count) i = 0;
-               adapter->tx_ring.next_to_use = i;
+               if (++i == tx_ring->count) i = 0;
+               tx_ring->next_to_use = i;
 
                return 1;
        }
@@ -1934,7 +2387,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
 }
 
 static inline boolean_t
-e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+              struct sk_buff *skb)
 {
        struct e1000_context_desc *context_desc;
        unsigned int i;
@@ -1943,8 +2397,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
        if(likely(skb->ip_summed == CHECKSUM_HW)) {
                css = skb->h.raw - skb->data;
 
-               i = adapter->tx_ring.next_to_use;
-               context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+               i = tx_ring->next_to_use;
+               context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 
                context_desc->upper_setup.tcp_fields.tucss = css;
                context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
@@ -1952,8 +2406,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
                context_desc->tcp_seg_setup.data = 0;
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
 
-               if(unlikely(++i == adapter->tx_ring.count)) i = 0;
-               adapter->tx_ring.next_to_use = i;
+               if (unlikely(++i == tx_ring->count)) i = 0;
+               tx_ring->next_to_use = i;
 
                return TRUE;
        }
@@ -1965,11 +2419,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
 
 static inline int
-e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
-       unsigned int first, unsigned int max_per_txd,
-       unsigned int nr_frags, unsigned int mss)
+e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+             struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
+             unsigned int nr_frags, unsigned int mss)
 {
-       struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
        struct e1000_buffer *buffer_info;
        unsigned int len = skb->len;
        unsigned int offset = 0, size, count = 0, i;
@@ -2065,9 +2518,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
 }
 
 static inline void
-e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
+e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+               int tx_flags, int count)
 {
-       struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
        struct e1000_tx_desc *tx_desc = NULL;
        struct e1000_buffer *buffer_info;
        uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -2113,7 +2566,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
        wmb();
 
        tx_ring->next_to_use = i;
-       E1000_WRITE_REG(&adapter->hw, TDT, i);
+       writel(i, adapter->hw.hw_addr + tx_ring->tdt);
 }
 
 /**
@@ -2206,6 +2659,7 @@ static int
 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_tx_ring *tx_ring;
        unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
@@ -2218,7 +2672,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        unsigned int f;
        len -= skb->data_len;
 
-       if(unlikely(skb->len <= 0)) {
+#ifdef CONFIG_E1000_MQ
+       tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
+#else
+       tx_ring = adapter->tx_ring;
+#endif
+
+       if (unlikely(skb->len <= 0)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
@@ -2262,21 +2722,42 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        if(adapter->pcix_82544)
                count += nr_frags;
 
-       local_irq_save(flags); 
-       if (!spin_trylock(&adapter->tx_lock)) { 
-               /* Collision - tell upper layer to requeue */ 
-               local_irq_restore(flags); 
-               return NETDEV_TX_LOCKED; 
-       } 
+#ifdef NETIF_F_TSO
+       /* TSO Workaround for 82571/2 Controllers -- if skb->data
+        * points to just header, pull a few bytes of payload from 
+        * frags into skb->data */
+       if (skb_shinfo(skb)->tso_size) {
+               uint8_t hdr_len;
+               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && 
+                       (adapter->hw.mac_type == e1000_82571 ||
+                       adapter->hw.mac_type == e1000_82572)) {
+                       unsigned int pull_size;
+                       pull_size = min((unsigned int)4, skb->data_len);
+                       if (!__pskb_pull_tail(skb, pull_size)) {
+                               printk(KERN_ERR "__pskb_pull_tail failed.\n");
+                               dev_kfree_skb_any(skb);
+                               return -EFAULT;
+                       }
+               }
+       }
+#endif
+
        if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
                e1000_transfer_dhcp_info(adapter, skb);
 
+       local_irq_save(flags);
+       if (!spin_trylock(&tx_ring->tx_lock)) {
+               /* Collision - tell upper layer to requeue */
+               local_irq_restore(flags);
+               return NETDEV_TX_LOCKED;
+       }
 
        /* need: count + 2 desc gap to keep tail from touching
         * head, otherwise try next time */
-       if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
+       if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
                netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
+               spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -2284,7 +2765,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
                        netif_stop_queue(netdev);
                        mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
-                       spin_unlock_irqrestore(&adapter->tx_lock, flags);
+                       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                        return NETDEV_TX_BUSY;
                }
        }
@@ -2294,37 +2775,37 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
-       first = adapter->tx_ring.next_to_use;
+       first = tx_ring->next_to_use;
        
-       tso = e1000_tso(adapter, skb);
+       tso = e1000_tso(adapter, tx_ring, skb);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
-               spin_unlock_irqrestore(&adapter->tx_lock, flags);
+               spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                return NETDEV_TX_OK;
        }
 
        if (likely(tso))
                tx_flags |= E1000_TX_FLAGS_TSO;
-       else if(likely(e1000_tx_csum(adapter, skb)))
+       else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
                tx_flags |= E1000_TX_FLAGS_CSUM;
 
        /* Old method was to assume IPv4 packet by default if TSO was enabled.
-        * 82573 hardware supports TSO capabilities for IPv6 as well...
+        * 82571 hardware supports TSO capabilities for IPv6 as well...
         * no longer assume, we must. */
-       if(likely(skb->protocol == ntohs(ETH_P_IP)))
+       if (likely(skb->protocol == ntohs(ETH_P_IP)))
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
-       e1000_tx_queue(adapter,
-               e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
-               tx_flags);
+       e1000_tx_queue(adapter, tx_ring, tx_flags,
+                      e1000_tx_map(adapter, tx_ring, skb, first,
+                                   max_per_txd, nr_frags, mss));
 
        netdev->trans_start = jiffies;
 
        /* Make sure there is space in the ring for the next send. */
-       if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
+       if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
                netif_stop_queue(netdev);
 
-       spin_unlock_irqrestore(&adapter->tx_lock, flags);
+       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        return NETDEV_TX_OK;
 }
 
@@ -2388,9 +2869,18 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
                        return -EINVAL;
        }
 
-#define MAX_STD_JUMBO_FRAME_SIZE 9216
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
        /* might want this to be bigger enum check... */
-       if (adapter->hw.mac_type == e1000_82573 &&
+       /* 82571 controllers limit jumbo frame size to 10500 bytes */
+       if ((adapter->hw.mac_type == e1000_82571 || 
+            adapter->hw.mac_type == e1000_82572) &&
+           max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+               DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
+                                   "on 82571 and 82572 controllers.\n");
+               return -EINVAL;
+       }
+
+       if(adapter->hw.mac_type == e1000_82573 &&
            max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
                DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
                                    "on 82573\n");
@@ -2578,6 +3068,29 @@ e1000_update_stats(struct e1000_adapter *adapter)
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
 
+#ifdef CONFIG_E1000_MQ
+void
+e1000_rx_schedule(void *data)
+{
+       struct net_device *poll_dev, *netdev = data;
+       struct e1000_adapter *adapter = netdev->priv;
+       int this_cpu = get_cpu();
+
+       poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
+       if (poll_dev == NULL) {
+               put_cpu();
+               return;
+       }
+
+       if (likely(netif_rx_schedule_prep(poll_dev)))
+               __netif_rx_schedule(poll_dev);
+       else
+               e1000_irq_enable(adapter);
+
+       put_cpu();
+}
+#endif
+
 /**
  * e1000_intr - Interrupt Handler
  * @irq: interrupt number
@@ -2592,8 +3105,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        uint32_t icr = E1000_READ_REG(hw, ICR);
-#ifndef CONFIG_E1000_NAPI
-       unsigned int i;
+#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
+       int i;
 #endif
 
        if(unlikely(!icr))
@@ -2605,17 +3118,31 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
        }
 
 #ifdef CONFIG_E1000_NAPI
-       if(likely(netif_rx_schedule_prep(netdev))) {
-
-               /* Disable interrupts and register for poll. The flush 
-                 of the posted write is intentionally left out.
-               */
-
-               atomic_inc(&adapter->irq_sem);
-               E1000_WRITE_REG(hw, IMC, ~0);
-               __netif_rx_schedule(netdev);
+       atomic_inc(&adapter->irq_sem);
+       E1000_WRITE_REG(hw, IMC, ~0);
+       E1000_WRITE_FLUSH(hw);
+#ifdef CONFIG_E1000_MQ
+       if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
+               cpu_set(adapter->cpu_for_queue[0],
+                       adapter->rx_sched_call_data.cpumask);
+               for (i = 1; i < adapter->num_queues; i++) {
+                       cpu_set(adapter->cpu_for_queue[i],
+                               adapter->rx_sched_call_data.cpumask);
+                       atomic_inc(&adapter->irq_sem);
+               }
+               atomic_set(&adapter->rx_sched_call_data.count, i);
+               smp_call_async_mask(&adapter->rx_sched_call_data);
+       } else {
+               printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
        }
-#else
+#else /* if !CONFIG_E1000_MQ */
+       if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
+               __netif_rx_schedule(&adapter->polling_netdev[0]);
+       else
+               e1000_irq_enable(adapter);
+#endif /* CONFIG_E1000_MQ */
+
+#else /* if !CONFIG_E1000_NAPI */
        /* Writing IMC and IMS is needed for 82547.
           Due to Hub Link bus being occupied, an interrupt
           de-assertion message is not able to be sent.
@@ -2632,13 +3159,14 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
        }
 
        for(i = 0; i < E1000_MAX_INTR; i++)
-               if(unlikely(!adapter->clean_rx(adapter) &
-                  !e1000_clean_tx_irq(adapter)))
+               if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+                  !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
                        break;
 
        if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
                e1000_irq_enable(adapter);
-#endif
+
+#endif /* CONFIG_E1000_NAPI */
 
        return IRQ_HANDLED;
 }
@@ -2650,22 +3178,37 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
  **/
 
 static int
-e1000_clean(struct net_device *netdev, int *budget)
+e1000_clean(struct net_device *poll_dev, int *budget)
 {
-       struct e1000_adapter *adapter = netdev_priv(netdev);
-       int work_to_do = min(*budget, netdev->quota);
-       int tx_cleaned;
-       int work_done = 0;
+       struct e1000_adapter *adapter;
+       int work_to_do = min(*budget, poll_dev->quota);
+       int tx_cleaned, i = 0, work_done = 0;
 
-       tx_cleaned = e1000_clean_tx_irq(adapter);
-       adapter->clean_rx(adapter, &work_done, work_to_do);
+       /* Must NOT use netdev_priv macro here. */
+       adapter = poll_dev->priv;
+
+       /* Keep link state information with original netdev */
+       if (!netif_carrier_ok(adapter->netdev))
+               goto quit_polling;
+
+       while (poll_dev != &adapter->polling_netdev[i]) {
+               i++;
+               if (unlikely(i == adapter->num_queues))
+                       BUG();
+       }
+
+       tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+       adapter->clean_rx(adapter, &adapter->rx_ring[i],
+                         &work_done, work_to_do);
 
        *budget -= work_done;
-       netdev->quota -= work_done;
+       poll_dev->quota -= work_done;
        
-       if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
        /* If no Tx and not enough Rx work done, exit the polling mode */
-               netif_rx_complete(netdev);
+       if((!tx_cleaned && (work_done == 0)) ||
+          !netif_running(adapter->netdev)) {
+quit_polling:
+               netif_rx_complete(poll_dev);
                e1000_irq_enable(adapter);
                return 0;
        }
@@ -2680,9 +3223,9 @@ e1000_clean(struct net_device *netdev, int *budget)
  **/
 
 static boolean_t
-e1000_clean_tx_irq(struct e1000_adapter *adapter)
+e1000_clean_tx_irq(struct e1000_adapter *adapter,
+                   struct e1000_tx_ring *tx_ring)
 {
-       struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
        struct net_device *netdev = adapter->netdev;
        struct e1000_tx_desc *tx_desc, *eop_desc;
        struct e1000_buffer *buffer_info;
@@ -2693,12 +3236,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
        eop = tx_ring->buffer_info[i].next_to_watch;
        eop_desc = E1000_TX_DESC(*tx_ring, eop);
 
-       while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+       while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
                /* Premature writeback of Tx descriptors clear (free buffers
                 * and unmap pci_mapping) previous_buffer_info */
-               if (likely(adapter->previous_buffer_info.skb != NULL)) {
+               if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
                        e1000_unmap_and_free_tx_resource(adapter,
-                                       &adapter->previous_buffer_info);
+                                       &tx_ring->previous_buffer_info);
                }
 
                for(cleaned = FALSE; !cleaned; ) {
@@ -2714,7 +3257,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
 #ifdef NETIF_F_TSO
                        } else {
                                if (cleaned) {
-                                       memcpy(&adapter->previous_buffer_info,
+                                       memcpy(&tx_ring->previous_buffer_info,
                                               buffer_info,
                                               sizeof(struct e1000_buffer));
                                        memset(buffer_info, 0,
@@ -2732,6 +3275,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
 
                        if(unlikely(++i == tx_ring->count)) i = 0;
                }
+
+               tx_ring->pkt++;
                
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
@@ -2739,15 +3284,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
 
        tx_ring->next_to_clean = i;
 
-       spin_lock(&adapter->tx_lock);
+       spin_lock(&tx_ring->tx_lock);
 
        if(unlikely(cleaned && netif_queue_stopped(netdev) &&
                    netif_carrier_ok(netdev)))
                netif_wake_queue(netdev);
 
-       spin_unlock(&adapter->tx_lock);
-       if(adapter->detect_tx_hung) {
+       spin_unlock(&tx_ring->tx_lock);
 
+       if (adapter->detect_tx_hung) {
                /* Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i */
                adapter->detect_tx_hung = FALSE;
@@ -2771,8 +3316,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
                                        "  next_to_watch        <%x>\n"
                                        "  jiffies              <%lx>\n"
                                        "  next_to_watch.status <%x>\n",
-                               E1000_READ_REG(&adapter->hw, TDH),
-                               E1000_READ_REG(&adapter->hw, TDT),
+                               readl(adapter->hw.hw_addr + tx_ring->tdh),
+                               readl(adapter->hw.hw_addr + tx_ring->tdt),
                                tx_ring->next_to_use,
                                i,
                                (unsigned long long)tx_ring->buffer_info[i].dma,
@@ -2784,12 +3329,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
                }
        }
 #ifdef NETIF_F_TSO
-
-       if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
-           time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
+       if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+           time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
                e1000_unmap_and_free_tx_resource(
-                   adapter, &adapter->previous_buffer_info);
-
+                   adapter, &tx_ring->previous_buffer_info);
 #endif
        return cleaned;
 }
@@ -2852,13 +3395,14 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
 
 static boolean_t
 #ifdef CONFIG_E1000_NAPI
-e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
-                   int work_to_do)
+e1000_clean_rx_irq(struct e1000_adapter *adapter,
+                   struct e1000_rx_ring *rx_ring,
+                   int *work_done, int work_to_do)
 #else
-e1000_clean_rx_irq(struct e1000_adapter *adapter)
+e1000_clean_rx_irq(struct e1000_adapter *adapter,
+                   struct e1000_rx_ring *rx_ring)
 #endif
 {
-       struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_rx_desc *rx_desc;
@@ -2944,6 +3488,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
                }
 #endif /* CONFIG_E1000_NAPI */
                netdev->last_rx = jiffies;
+               rx_ring->pkt++;
 
 next_desc:
                rx_desc->status = 0;
@@ -2953,7 +3498,7 @@ next_desc:
                rx_desc = E1000_RX_DESC(*rx_ring, i);
        }
        rx_ring->next_to_clean = i;
-       adapter->alloc_rx_buf(adapter);
+       adapter->alloc_rx_buf(adapter, rx_ring);
 
        return cleaned;
 }
@@ -2965,13 +3510,14 @@ next_desc:
 
 static boolean_t
 #ifdef CONFIG_E1000_NAPI
-e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done,
-                      int work_to_do)
+e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+                      struct e1000_rx_ring *rx_ring,
+                      int *work_done, int work_to_do)
 #else
-e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
+e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+                      struct e1000_rx_ring *rx_ring)
 #endif
 {
-       struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
        union e1000_rx_desc_packet_split *rx_desc;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -3027,7 +3573,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
                /* Good Receive */
                skb_put(skb, length);
 
-               for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+               for(j = 0; j < adapter->rx_ps_pages; j++) {
                        if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
                                break;
 
@@ -3048,11 +3594,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
                                  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
                skb->protocol = eth_type_trans(skb, netdev);
 
-#ifdef HAVE_RX_ZERO_COPY
                if(likely(rx_desc->wb.upper.header_status &
-                         E1000_RXDPS_HDRSTAT_HDRSP))
+                         E1000_RXDPS_HDRSTAT_HDRSP)) {
+                       adapter->rx_hdr_split++;
+#ifdef HAVE_RX_ZERO_COPY
                        skb_shinfo(skb)->zero_copy = TRUE;
 #endif
+               }
 #ifdef CONFIG_E1000_NAPI
                if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
                        vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
@@ -3071,6 +3619,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
                }
 #endif /* CONFIG_E1000_NAPI */
                netdev->last_rx = jiffies;
+               rx_ring->pkt++;
 
 next_desc:
                rx_desc->wb.middle.status_error &= ~0xFF;
@@ -3081,7 +3630,7 @@ next_desc:
                staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
        }
        rx_ring->next_to_clean = i;
-       adapter->alloc_rx_buf(adapter);
+       adapter->alloc_rx_buf(adapter, rx_ring);
 
        return cleaned;
 }
@@ -3092,9 +3641,9 @@ next_desc:
  **/
 
 static void
-e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
+e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+                       struct e1000_rx_ring *rx_ring)
 {
-       struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_rx_desc *rx_desc;
@@ -3178,7 +3727,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
                         * applicable for weak-ordered memory model archs,
                         * such as IA-64). */
                        wmb();
-                       E1000_WRITE_REG(&adapter->hw, RDT, i);
+                       writel(i, adapter->hw.hw_addr + rx_ring->rdt);
                }
 
                if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3194,9 +3743,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
  **/
 
 static void
-e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
+e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+                          struct e1000_rx_ring *rx_ring)
 {
-       struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union e1000_rx_desc_packet_split *rx_desc;
@@ -3215,22 +3764,26 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
                rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 
                for(j = 0; j < PS_PAGE_BUFFERS; j++) {
-                       if(unlikely(!ps_page->ps_page[j])) {
-                               ps_page->ps_page[j] =
-                                       alloc_page(GFP_ATOMIC);
-                               if(unlikely(!ps_page->ps_page[j]))
-                                       goto no_buffers;
-                               ps_page_dma->ps_page_dma[j] =
-                                       pci_map_page(pdev,
-                                                    ps_page->ps_page[j],
-                                                    0, PAGE_SIZE,
-                                                    PCI_DMA_FROMDEVICE);
-                       }
-                       /* Refresh the desc even if buffer_addrs didn't
-                        * change because each write-back erases this info.
-                        */
-                       rx_desc->read.buffer_addr[j+1] =
-                               cpu_to_le64(ps_page_dma->ps_page_dma[j]);
+                       if (j < adapter->rx_ps_pages) {
+                               if (likely(!ps_page->ps_page[j])) {
+                                       ps_page->ps_page[j] =
+                                               alloc_page(GFP_ATOMIC);
+                                       if (unlikely(!ps_page->ps_page[j]))
+                                               goto no_buffers;
+                                       ps_page_dma->ps_page_dma[j] =
+                                               pci_map_page(pdev,
+                                                           ps_page->ps_page[j],
+                                                           0, PAGE_SIZE,
+                                                           PCI_DMA_FROMDEVICE);
+                               }
+                               /* Refresh the desc even if buffer_addrs didn't
+                                * change because each write-back erases 
+                                * this info.
+                                */
+                               rx_desc->read.buffer_addr[j+1] =
+                                    cpu_to_le64(ps_page_dma->ps_page_dma[j]);
+                       } else
+                               rx_desc->read.buffer_addr[j+1] = ~0;
                }
 
                skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
@@ -3264,7 +3817,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
                         * descriptors are 32 bytes...so we increment tail
                         * twice as much.
                         */
-                       E1000_WRITE_REG(&adapter->hw, RDT, i<<1);
+                       writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
                }
 
                if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3715,6 +4268,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
        }
 
        switch(adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+               break;
        case e1000_82573:
                swsm = E1000_READ_REG(&adapter->hw, SWSM);
                E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3737,6 +4296,7 @@ e1000_resume(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        uint32_t manc, ret_val, swsm;
+       uint32_t ctrl_ext;
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -3762,6 +4322,12 @@ e1000_resume(struct pci_dev *pdev)
        }
 
        switch(adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+               break;
        case e1000_82573:
                swsm = E1000_READ_REG(&adapter->hw, SWSM);
                E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3786,7 +4352,7 @@ e1000_netpoll(struct net_device *netdev)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        disable_irq(adapter->pdev->irq);
        e1000_intr(adapter->pdev->irq, netdev, NULL);
-       e1000_clean_tx_irq(adapter);
+       e1000_clean_tx_irq(adapter, adapter->tx_ring);
        enable_irq(adapter->pdev->irq);
 }
 #endif
index 676247f9f1cca054967d604547f005d1340b5eee..38695d5b46377ca035cff87e47fd32c9d5f69015 100644 (file)
@@ -306,7 +306,8 @@ e1000_check_options(struct e1000_adapter *adapter)
                        .def  = E1000_DEFAULT_TXD,
                        .arg  = { .r = { .min = E1000_MIN_TXD }}
                };
-               struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+               struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+               int i;
                e1000_mac_type mac_type = adapter->hw.mac_type;
                opt.arg.r.max = mac_type < e1000_82544 ?
                        E1000_MAX_TXD : E1000_MAX_82544_TXD;
@@ -319,6 +320,8 @@ e1000_check_options(struct e1000_adapter *adapter)
                } else {
                        tx_ring->count = opt.def;
                }
+               for (i = 0; i < adapter->num_queues; i++)
+                       tx_ring[i].count = tx_ring->count;
        }
        { /* Receive Descriptor Count */
                struct e1000_option opt = {
@@ -329,7 +332,8 @@ e1000_check_options(struct e1000_adapter *adapter)
                        .def  = E1000_DEFAULT_RXD,
                        .arg  = { .r = { .min = E1000_MIN_RXD }}
                };
-               struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+               struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+               int i;
                e1000_mac_type mac_type = adapter->hw.mac_type;
                opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
                        E1000_MAX_82544_RXD;
@@ -342,6 +346,8 @@ e1000_check_options(struct e1000_adapter *adapter)
                } else {
                        rx_ring->count = opt.def;
                }
+               for (i = 0; i < adapter->num_queues; i++)
+                       rx_ring[i].count = rx_ring->count;
        }
        { /* Checksum Offload Enable/Disable */
                struct e1000_option opt = {
index 87f522738bfcb718b63526450844f696bd750181..f119ec4e89ea3702e22f15f3dfda948e61f9bad3 100644 (file)
@@ -1334,7 +1334,7 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
 static int epic_poll(struct net_device *dev, int *budget)
 {
        struct epic_private *ep = dev->priv;
-       int work_done, orig_budget;
+       int work_done = 0, orig_budget;
        long ioaddr = dev->base_addr;
 
        orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
@@ -1343,7 +1343,7 @@ rx_action:
 
        epic_tx(dev, ep);
 
-       work_done = epic_rx(dev, *budget);
+       work_done += epic_rx(dev, *budget);
 
        epic_rx_err(dev, ep);
 
index d6eefdb71c174889b6491635121fe33f9b789594..22aec6ed80f56b7dcfd547cf5d5f149fc8310fb2 100644 (file)
@@ -95,6 +95,8 @@
  *                        of nv_remove
  *      0.42: 06 Aug 2005: Fix lack of link speed initialization
  *                        in the second (and later) nv_open call
+ *      0.43: 10 Aug 2005: Add support for tx checksum.
+ *      0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION              "0.41"
+#define FORCEDETH_VERSION              "0.44"
 #define DRV_NAME                       "forcedeth"
 
 #include <linux/module.h>
 #define DEV_NEED_LINKTIMER     0x0002  /* poll link settings. Relies on the timer irq */
 #define DEV_HAS_LARGEDESC      0x0004  /* device supports jumbo frames and needs packet format 2 */
 #define DEV_HAS_HIGH_DMA        0x0008  /* device supports 64bit dma */
+#define DEV_HAS_CHECKSUM        0x0010  /* device supports tx and rx checksum offloads */
 
 enum {
        NvRegIrqStatus = 0x000,
@@ -241,6 +244,9 @@ enum {
 #define NVREG_TXRXCTL_IDLE     0x0008
 #define NVREG_TXRXCTL_RESET    0x0010
 #define NVREG_TXRXCTL_RXCHECK  0x0400
+#define NVREG_TXRXCTL_DESC_1   0
+#define NVREG_TXRXCTL_DESC_2   0x02100
+#define NVREG_TXRXCTL_DESC_3   0x02200
        NvRegMIIStatus = 0x180,
 #define NVREG_MIISTAT_ERROR            0x0001
 #define NVREG_MIISTAT_LINKCHANGE       0x0008
@@ -335,6 +341,10 @@ typedef union _ring_type {
 /* error and valid are the same for both */
 #define NV_TX2_ERROR           (1<<30)
 #define NV_TX2_VALID           (1<<31)
+#define NV_TX2_TSO             (1<<28)
+#define NV_TX2_TSO_SHIFT       14
+#define NV_TX2_CHECKSUM_L3     (1<<27)
+#define NV_TX2_CHECKSUM_L4     (1<<26)
 
 #define NV_RX_DESCRIPTORVALID  (1<<16)
 #define NV_RX_MISSEDFRAME      (1<<17)
@@ -417,14 +427,14 @@ typedef union _ring_type {
 
 /* 
  * desc_ver values:
- * This field has two purposes:
- * - Newer nics uses a different ring layout. The layout is selected by
- *   comparing np->desc_ver with DESC_VER_xy.
- * - It contains bits that are forced on when writing to NvRegTxRxControl.
+ * The nic supports three different descriptor types:
+ * - DESC_VER_1: Original
+ * - DESC_VER_2: support for jumbo frames.
+ * - DESC_VER_3: 64-bit format.
  */
-#define DESC_VER_1     0x0
-#define DESC_VER_2     (0x02100|NVREG_TXRXCTL_RXCHECK)
-#define DESC_VER_3      (0x02200|NVREG_TXRXCTL_RXCHECK)
+#define DESC_VER_1     1
+#define DESC_VER_2     2
+#define DESC_VER_3     3
 
 /* PHY defines */
 #define PHY_OUI_MARVELL        0x5043
@@ -491,6 +501,7 @@ struct fe_priv {
        u32 orig_mac[2];
        u32 irqmask;
        u32 desc_ver;
+       u32 txrxctl_bits;
 
        void __iomem *base;
 
@@ -534,7 +545,7 @@ static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 
 static inline u8 __iomem *get_hwbase(struct net_device *dev)
 {
-       return get_nvpriv(dev)->base;
+       return ((struct fe_priv *)netdev_priv(dev))->base;
 }
 
 static inline void pci_push(u8 __iomem *base)
@@ -623,7 +634,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
 
 static int phy_reset(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u32 miicontrol;
        unsigned int tries = 0;
 
@@ -726,7 +737,7 @@ static int phy_init(struct net_device *dev)
 
 static void nv_start_rx(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
        dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
@@ -782,14 +793,14 @@ static void nv_stop_tx(struct net_device *dev)
 
 static void nv_txrx_reset(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
        dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
-       writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl);
+       writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
        pci_push(base);
        udelay(NV_TXRX_RESET_DELAY);
-       writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl);
+       writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
        pci_push(base);
 }
 
@@ -801,7 +812,7 @@ static void nv_txrx_reset(struct net_device *dev)
  */
 static struct net_device_stats *nv_get_stats(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
 
        /* It seems that the nic always generates interrupts and doesn't
         * accumulate errors internally. Thus the current values in np->stats
@@ -817,7 +828,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
  */
 static int nv_alloc_rx(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        unsigned int refill_rx = np->refill_rx;
        int nr;
 
@@ -861,7 +872,7 @@ static int nv_alloc_rx(struct net_device *dev)
 static void nv_do_rx_refill(unsigned long data)
 {
        struct net_device *dev = (struct net_device *) data;
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
 
        disable_irq(dev->irq);
        if (nv_alloc_rx(dev)) {
@@ -875,7 +886,7 @@ static void nv_do_rx_refill(unsigned long data)
 
 static void nv_init_rx(struct net_device *dev) 
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        int i;
 
        np->cur_rx = RX_RING;
@@ -889,15 +900,17 @@ static void nv_init_rx(struct net_device *dev)
 
 static void nv_init_tx(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        int i;
 
        np->next_tx = np->nic_tx = 0;
-       for (i = 0; i < TX_RING; i++)
+       for (i = 0; i < TX_RING; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        np->tx_ring.orig[i].FlagLen = 0;
                else
                        np->tx_ring.ex[i].FlagLen = 0;
+               np->tx_skbuff[i] = NULL;
+       }
 }
 
 static int nv_init_ring(struct net_device *dev)
@@ -907,21 +920,44 @@ static int nv_init_ring(struct net_device *dev)
        return nv_alloc_rx(dev);
 }
 
+static void nv_release_txskb(struct net_device *dev, unsigned int skbnr)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       struct sk_buff *skb = np->tx_skbuff[skbnr];
+       unsigned int j, entry, fragments;
+                       
+       dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n",
+               dev->name, skbnr, np->tx_skbuff[skbnr]);
+       
+       entry = skbnr;
+       if ((fragments = skb_shinfo(skb)->nr_frags) != 0) {
+               for (j = fragments; j >= 1; j--) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1];
+                       pci_unmap_page(np->pci_dev, np->tx_dma[entry],
+                                      frag->size,
+                                      PCI_DMA_TODEVICE);
+                       entry = (entry - 1) % TX_RING;
+               }
+       }
+       pci_unmap_single(np->pci_dev, np->tx_dma[entry],
+                        skb->len - skb->data_len,
+                        PCI_DMA_TODEVICE);
+       dev_kfree_skb_irq(skb);
+       np->tx_skbuff[skbnr] = NULL;
+}
+
 static void nv_drain_tx(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
-       int i;
+       struct fe_priv *np = netdev_priv(dev);
+       unsigned int i;
+       
        for (i = 0; i < TX_RING; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
                        np->tx_ring.orig[i].FlagLen = 0;
                else
                        np->tx_ring.ex[i].FlagLen = 0;
                if (np->tx_skbuff[i]) {
-                       pci_unmap_single(np->pci_dev, np->tx_dma[i],
-                                               np->tx_skbuff[i]->len,
-                                               PCI_DMA_TODEVICE);
-                       dev_kfree_skb(np->tx_skbuff[i]);
-                       np->tx_skbuff[i] = NULL;
+                       nv_release_txskb(dev, i);
                        np->stats.tx_dropped++;
                }
        }
@@ -929,7 +965,7 @@ static void nv_drain_tx(struct net_device *dev)
 
 static void nv_drain_rx(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        int i;
        for (i = 0; i < RX_RING; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
@@ -959,28 +995,69 @@ static void drain_ring(struct net_device *dev)
  */
 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
-       int nr = np->next_tx % TX_RING;
+       struct fe_priv *np = netdev_priv(dev);
+       u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+       unsigned int fragments = skb_shinfo(skb)->nr_frags;
+       unsigned int nr = (np->next_tx + fragments) % TX_RING;
+       unsigned int i;
+
+       spin_lock_irq(&np->lock);
+
+       if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) {
+               spin_unlock_irq(&np->lock);
+               netif_stop_queue(dev);
+               return NETDEV_TX_BUSY;
+       }
 
        np->tx_skbuff[nr] = skb;
-       np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
-                                       PCI_DMA_TODEVICE);
+       
+       if (fragments) {
+               dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments);
+               /* setup descriptors in reverse order */
+               for (i = fragments; i >= 1; i--) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
+                       np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size,
+                                                       PCI_DMA_TODEVICE);
 
-       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+                       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+                               np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+                               np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
+                       } else {
+                               np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+                               np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+                               np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
+                       }
+                       
+                       nr = (nr - 1) % TX_RING;
+
+                       if (np->desc_ver == DESC_VER_1)
+                               tx_flags_extra &= ~NV_TX_LASTPACKET;
+                       else
+                               tx_flags_extra &= ~NV_TX2_LASTPACKET;           
+               }
+       }
+
+#ifdef NETIF_F_TSO
+       if (skb_shinfo(skb)->tso_size)
+               tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+       else
+#endif
+       tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+
+       np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len,
+                                       PCI_DMA_TODEVICE);
+       
+       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
-       else {
+               np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
+       } else {
                np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
                np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
-       }
+               np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
+       }       
 
-       spin_lock_irq(&np->lock);
-       wmb();
-       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-               np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
-       else
-               np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
-       dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
-                               dev->name, np->next_tx);
+       dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
+                               dev->name, np->next_tx, tx_flags_extra);
        {
                int j;
                for (j=0; j<64; j++) {
@@ -991,15 +1068,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                dprintk("\n");
        }
 
-       np->next_tx++;
+       np->next_tx += 1 + fragments;
 
        dev->trans_start = jiffies;
-       if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
-               netif_stop_queue(dev);
        spin_unlock_irq(&np->lock);
-       writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
        pci_push(get_hwbase(dev));
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 /*
@@ -1009,9 +1084,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 static void nv_tx_done(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u32 Flags;
-       int i;
+       unsigned int i;
+       struct sk_buff *skb;
 
        while (np->nic_tx != np->next_tx) {
                i = np->nic_tx % TX_RING;
@@ -1026,35 +1102,38 @@ static void nv_tx_done(struct net_device *dev)
                if (Flags & NV_TX_VALID)
                        break;
                if (np->desc_ver == DESC_VER_1) {
-                       if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
-                                                       NV_TX_UNDERFLOW|NV_TX_ERROR)) {
-                               if (Flags & NV_TX_UNDERFLOW)
-                                       np->stats.tx_fifo_errors++;
-                               if (Flags & NV_TX_CARRIERLOST)
-                                       np->stats.tx_carrier_errors++;
-                               np->stats.tx_errors++;
-                       } else {
-                               np->stats.tx_packets++;
-                               np->stats.tx_bytes += np->tx_skbuff[i]->len;
+                       if (Flags & NV_TX_LASTPACKET) {
+                               skb = np->tx_skbuff[i];
+                               if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
+                                            NV_TX_UNDERFLOW|NV_TX_ERROR)) {
+                                       if (Flags & NV_TX_UNDERFLOW)
+                                               np->stats.tx_fifo_errors++;
+                                       if (Flags & NV_TX_CARRIERLOST)
+                                               np->stats.tx_carrier_errors++;
+                                       np->stats.tx_errors++;
+                               } else {
+                                       np->stats.tx_packets++;
+                                       np->stats.tx_bytes += skb->len;
+                               }
+                               nv_release_txskb(dev, i);
                        }
                } else {
-                       if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
-                                                       NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
-                               if (Flags & NV_TX2_UNDERFLOW)
-                                       np->stats.tx_fifo_errors++;
-                               if (Flags & NV_TX2_CARRIERLOST)
-                                       np->stats.tx_carrier_errors++;
-                               np->stats.tx_errors++;
-                       } else {
-                               np->stats.tx_packets++;
-                               np->stats.tx_bytes += np->tx_skbuff[i]->len;
+                       if (Flags & NV_TX2_LASTPACKET) {
+                               skb = np->tx_skbuff[i];
+                               if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
+                                            NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
+                                       if (Flags & NV_TX2_UNDERFLOW)
+                                               np->stats.tx_fifo_errors++;
+                                       if (Flags & NV_TX2_CARRIERLOST)
+                                               np->stats.tx_carrier_errors++;
+                                       np->stats.tx_errors++;
+                               } else {
+                                       np->stats.tx_packets++;
+                                       np->stats.tx_bytes += skb->len;
+                               }                               
+                               nv_release_txskb(dev, i);
                        }
                }
-               pci_unmap_single(np->pci_dev, np->tx_dma[i],
-                                       np->tx_skbuff[i]->len,
-                                       PCI_DMA_TODEVICE);
-               dev_kfree_skb_irq(np->tx_skbuff[i]);
-               np->tx_skbuff[i] = NULL;
                np->nic_tx++;
        }
        if (np->next_tx - np->nic_tx < TX_LIMIT_START)
@@ -1067,7 +1146,7 @@ static void nv_tx_done(struct net_device *dev)
  */
 static void nv_tx_timeout(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
        printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
@@ -1200,7 +1279,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
 
 static void nv_rx_process(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u32 Flags;
 
        for (;;) {
@@ -1355,7 +1434,7 @@ static void set_bufsize(struct net_device *dev)
  */
 static int nv_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        int old_mtu;
 
        if (new_mtu < 64 || new_mtu > np->pkt_limit)
@@ -1408,7 +1487,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
                writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
                        base + NvRegRingSizes);
                pci_push(base);
-               writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+               writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
                pci_push(base);
 
                /* restart rx engine */
@@ -1440,7 +1519,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
  */
 static int nv_set_mac_address(struct net_device *dev, void *addr)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        struct sockaddr *macaddr = (struct sockaddr*)addr;
 
        if(!is_valid_ether_addr(macaddr->sa_data))
@@ -1475,7 +1554,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
  */
 static void nv_set_multicast(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        u32 addr[2];
        u32 mask[2];
@@ -1535,7 +1614,7 @@ static void nv_set_multicast(struct net_device *dev)
 
 static int nv_update_linkspeed(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        int adv, lpa;
        int newls = np->linkspeed;
@@ -1705,7 +1784,7 @@ static void nv_link_irq(struct net_device *dev)
 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
 {
        struct net_device *dev = (struct net_device *) data;
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        u32 events;
        int i;
@@ -1777,7 +1856,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
 static void nv_do_nic_poll(unsigned long data)
 {
        struct net_device *dev = (struct net_device *) data;
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
        disable_irq(dev->irq);
@@ -1801,7 +1880,7 @@ static void nv_poll_controller(struct net_device *dev)
 
 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        strcpy(info->driver, "forcedeth");
        strcpy(info->version, FORCEDETH_VERSION);
        strcpy(info->bus_info, pci_name(np->pci_dev));
@@ -1809,7 +1888,7 @@ static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        wolinfo->supported = WAKE_MAGIC;
 
        spin_lock_irq(&np->lock);
@@ -1820,7 +1899,7 @@ static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
 
 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
        spin_lock_irq(&np->lock);
@@ -2021,7 +2100,7 @@ static int nv_get_regs_len(struct net_device *dev)
 
 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        u32 *rbuf = buf;
        int i;
@@ -2035,7 +2114,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
 
 static int nv_nway_reset(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        int ret;
 
        spin_lock_irq(&np->lock);
@@ -2065,11 +2144,12 @@ static struct ethtool_ops ops = {
        .get_regs_len = nv_get_regs_len,
        .get_regs = nv_get_regs,
        .nway_reset = nv_nway_reset,
+       .get_perm_addr = ethtool_op_get_perm_addr,
 };
 
 static int nv_open(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
        int ret, oom, i;
 
@@ -2114,9 +2194,9 @@ static int nv_open(struct net_device *dev)
        /* 5) continue setup */
        writel(np->linkspeed, base + NvRegLinkSpeed);
        writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
-       writel(np->desc_ver, base + NvRegTxRxControl);
+       writel(np->txrxctl_bits, base + NvRegTxRxControl);
        pci_push(base);
-       writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl);
+       writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
        reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
                        NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
                        KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
@@ -2205,7 +2285,7 @@ out_drain:
 
 static int nv_close(struct net_device *dev)
 {
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base;
 
        spin_lock_irq(&np->lock);
@@ -2261,7 +2341,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        if (!dev)
                goto out;
 
-       np = get_nvpriv(dev);
+       np = netdev_priv(dev);
        np->pci_dev = pci_dev;
        spin_lock_init(&np->lock);
        SET_MODULE_OWNER(dev);
@@ -2313,19 +2393,32 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
                        printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
                                        pci_name(pci_dev));
+               } else {
+                       dev->features |= NETIF_F_HIGHDMA;
                }
+               np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
        } else if (id->driver_data & DEV_HAS_LARGEDESC) {
                /* packet format 2: supports jumbo frames */
                np->desc_ver = DESC_VER_2;
+               np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
        } else {
                /* original packet format */
                np->desc_ver = DESC_VER_1;
+               np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
        }
 
        np->pkt_limit = NV_PKTLIMIT_1;
        if (id->driver_data & DEV_HAS_LARGEDESC)
                np->pkt_limit = NV_PKTLIMIT_2;
 
+       if (id->driver_data & DEV_HAS_CHECKSUM) {
+               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+               dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+#ifdef NETIF_F_TSO
+               dev->features |= NETIF_F_TSO;
+#endif
+       }
+
        err = -ENOMEM;
        np->base = ioremap(addr, NV_PCI_REGSZ);
        if (!np->base)
@@ -2377,8 +2470,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
        dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
        dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       if (!is_valid_ether_addr(dev->dev_addr)) {
+       if (!is_valid_ether_addr(dev->perm_addr)) {
                /*
                 * Bad mac address. At least one bios sets the mac address
                 * to 01:23:45:67:89:ab
@@ -2403,9 +2497,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        np->wolenabled = 0;
 
        if (np->desc_ver == DESC_VER_1) {
-               np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
+               np->tx_flags = NV_TX_VALID;
        } else {
-               np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
+               np->tx_flags = NV_TX2_VALID;
        }
        np->irqmask = NVREG_IRQMASK_WANTED;
        if (id->driver_data & DEV_NEED_TIMERIRQ)
@@ -2494,7 +2588,7 @@ out:
 static void __devexit nv_remove(struct pci_dev *pci_dev)
 {
        struct net_device *dev = pci_get_drvdata(pci_dev);
-       struct fe_priv *np = get_nvpriv(dev);
+       struct fe_priv *np = netdev_priv(dev);
 
        unregister_netdev(dev);
 
@@ -2525,35 +2619,35 @@ static struct pci_device_id pci_tbl[] = {
        },
        {       /* nForce3 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
        },
        {       /* nForce3 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
        },
        {       /* nForce3 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
        },
        {       /* nForce3 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
        },
        {       /* CK804 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
        },
        {       /* CK804 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
        },
        {       /* MCP04 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
        },
        {       /* MCP04 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
        },
        {       /* MCP51 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
@@ -2565,11 +2659,11 @@ static struct pci_device_id pci_tbl[] = {
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
-               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
        },
        {0,},
 };
index 6518334b92801a41745718b09a74805e3e7b8004..ae5a2ed3b2640336a656087596206d2e462ee73e 100644 (file)
  *  define the configuration needed by the board are defined in a
  *  board structure in arch/ppc/platforms (though I do not
  *  discount the possibility that other architectures could one
- *  day be supported.  One assumption the driver currently makes
- *  is that the PHY is configured in such a way to advertise all
- *  capabilities.  This is a sensible default, and on certain
- *  PHYs, changing this default encounters substantial errata
- *  issues.  Future versions may remove this requirement, but for
- *  now, it is best for the firmware to ensure this is the case.
+ *  day be supported.
  *
  *  The Gianfar Ethernet Controller uses a ring of buffer
  *  descriptors.  The beginning is indicated by a register
@@ -47,7 +42,7 @@
  *  corresponding bit in the IMASK register is also set (if
  *  interrupt coalescing is active, then the interrupt may not
  *  happen immediately, but will wait until either a set number
- *  of frames or amount of time have passed.).  In NAPI, the
+ *  of frames or amount of time have passed).  In NAPI, the
  *  interrupt handler will signal there is work to be done, and
  *  exit.  Without NAPI, the packet(s) will be handled
  *  immediately.  Both methods will start at the last known empty
@@ -75,6 +70,7 @@
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <linux/unistd.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/version.h>
 #include <linux/dma-mapping.h>
 #include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
 
 #include "gianfar.h"
-#include "gianfar_phy.h"
+#include "gianfar_mii.h"
 
 #define TX_TIMEOUT      (1*HZ)
 #define SKB_ALLOC_TIMEOUT 1000000
 #endif
 
 const char gfar_driver_name[] = "Gianfar Ethernet";
-const char gfar_driver_version[] = "1.1";
+const char gfar_driver_version[] = "1.2";
 
-int startup_gfar(struct net_device *dev);
 static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_timeout(struct net_device *dev);
@@ -126,17 +123,13 @@ static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
-static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static void gfar_phy_change(void *data);
-static void gfar_phy_timer(unsigned long data);
 static void adjust_link(struct net_device *dev);
 static void init_registers(struct net_device *dev);
 static int init_phy(struct net_device *dev);
 static int gfar_probe(struct device *device);
 static int gfar_remove(struct device *device);
-void free_skb_resources(struct gfar_private *priv);
+static void free_skb_resources(struct gfar_private *priv);
 static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 #ifdef CONFIG_GFAR_NAPI
@@ -144,7 +137,6 @@ static int gfar_poll(struct net_device *dev, int *budget);
 #endif
 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
-static void gfar_phy_startup_timer(unsigned long data);
 static void gfar_vlan_rx_register(struct net_device *netdev,
                                struct vlan_group *grp);
 static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
@@ -162,6 +154,9 @@ int gfar_uses_fcb(struct gfar_private *priv)
        else
                return 0;
 }
+
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start */
 static int gfar_probe(struct device *device)
 {
        u32 tempval;
@@ -175,7 +170,7 @@ static int gfar_probe(struct device *device)
 
        einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
 
-       if (einfo == NULL) {
+       if (NULL == einfo) {
                printk(KERN_ERR "gfar %d: Missing additional data!\n",
                       pdev->id);
 
@@ -185,7 +180,7 @@ static int gfar_probe(struct device *device)
        /* Create an ethernet device instance */
        dev = alloc_etherdev(sizeof (*priv));
 
-       if (dev == NULL)
+       if (NULL == dev)
                return -ENOMEM;
 
        priv = netdev_priv(dev);
@@ -207,20 +202,11 @@ static int gfar_probe(struct device *device)
        priv->regs = (struct gfar *)
                ioremap(r->start, sizeof (struct gfar));
 
-       if (priv->regs == NULL) {
+       if (NULL == priv->regs) {
                err = -ENOMEM;
                goto regs_fail;
        }
 
-       /* Set the PHY base address */
-       priv->phyregs = (struct gfar *)
-           ioremap(einfo->phy_reg_addr, sizeof (struct gfar));
-
-       if (priv->phyregs == NULL) {
-               err = -ENOMEM;
-               goto phy_regs_fail;
-       }
-
        spin_lock_init(&priv->lock);
 
        dev_set_drvdata(device, dev);
@@ -386,12 +372,10 @@ static int gfar_probe(struct device *device)
        return 0;
 
 register_fail:
-       iounmap((void *) priv->phyregs);
-phy_regs_fail:
        iounmap((void *) priv->regs);
 regs_fail:
        free_netdev(dev);
-       return -ENOMEM;
+       return err;
 }
 
 static int gfar_remove(struct device *device)
@@ -402,108 +386,41 @@ static int gfar_remove(struct device *device)
        dev_set_drvdata(device, NULL);
 
        iounmap((void *) priv->regs);
-       iounmap((void *) priv->phyregs);
        free_netdev(dev);
 
        return 0;
 }
 
 
-/* Configure the PHY for dev.
- * returns 0 if success.  -1 if failure
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
  */
 static int init_phy(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct phy_info *curphy;
-       unsigned int timeout = PHY_INIT_TIMEOUT;
-       struct gfar *phyregs = priv->phyregs;
-       struct gfar_mii_info *mii_info;
-       int err;
+       uint gigabit_support =
+               priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+               SUPPORTED_1000baseT_Full : 0;
+       struct phy_device *phydev;
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
        priv->oldduplex = -1;
 
-       mii_info = kmalloc(sizeof(struct gfar_mii_info),
-                       GFP_KERNEL);
-
-       if(NULL == mii_info) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: Could not allocate mii_info\n",
-                                       dev->name);
-               return -ENOMEM;
-       }
-
-       mii_info->speed = SPEED_1000;
-       mii_info->duplex = DUPLEX_FULL;
-       mii_info->pause = 0;
-       mii_info->link = 1;
-
-       mii_info->advertising = (ADVERTISED_10baseT_Half |
-                       ADVERTISED_10baseT_Full |
-                       ADVERTISED_100baseT_Half |
-                       ADVERTISED_100baseT_Full |
-                       ADVERTISED_1000baseT_Full);
-       mii_info->autoneg = 1;
+       phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0);
 
-       spin_lock_init(&mii_info->mdio_lock);
-
-       mii_info->mii_id = priv->einfo->phyid;
-
-       mii_info->dev = dev;
-
-       mii_info->mdio_read = &read_phy_reg;
-       mii_info->mdio_write = &write_phy_reg;
-
-       priv->mii_info = mii_info;
-
-       /* Reset the management interface */
-       gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
-
-       /* Setup the MII Mgmt clock speed */
-       gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
-
-       /* Wait until the bus is free */
-       while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) &&
-                       timeout--)
-               cpu_relax();
-
-       if(timeout <= 0) {
-               printk(KERN_ERR "%s: The MII Bus is stuck!\n",
-                               dev->name);
-               err = -1;
-               goto bus_fail;
-       }
-
-       /* get info for this PHY */
-       curphy = get_phy_info(priv->mii_info);
-
-       if (curphy == NULL) {
-               if (netif_msg_ifup(priv))
-                       printk(KERN_ERR "%s: No PHY found\n", dev->name);
-               err = -1;
-               goto no_phy;
+       if (IS_ERR(phydev)) {
+               printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+               return PTR_ERR(phydev);
        }
 
-       mii_info->phyinfo = curphy;
+       /* Remove any features not supported by the controller */
+       phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+       phydev->advertising = phydev->supported;
 
-       /* Run the commands which initialize the PHY */
-       if(curphy->init) {
-               err = curphy->init(priv->mii_info);
-
-               if (err)
-                       goto phy_init_fail;
-       }
+       priv->phydev = phydev;
 
        return 0;
-
-phy_init_fail:
-no_phy:
-bus_fail:
-       kfree(mii_info);
-
-       return err;
 }
 
 static void init_registers(struct net_device *dev)
@@ -603,24 +520,13 @@ void stop_gfar(struct net_device *dev)
        struct gfar *regs = priv->regs;
        unsigned long flags;
 
+       phy_stop(priv->phydev);
+
        /* Lock it down */
        spin_lock_irqsave(&priv->lock, flags);
 
-       /* Tell the kernel the link is down */
-       priv->mii_info->link = 0;
-       adjust_link(dev);
-
        gfar_halt(dev);
 
-       if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
-               /* Clear any pending interrupts */
-               mii_clear_phy_interrupt(priv->mii_info);
-
-               /* Disable PHY Interrupts */
-               mii_configure_phy_interrupt(priv->mii_info,
-                               MII_INTERRUPT_DISABLED);
-       }
-
        spin_unlock_irqrestore(&priv->lock, flags);
 
        /* Free the IRQs */
@@ -629,13 +535,7 @@ void stop_gfar(struct net_device *dev)
                free_irq(priv->interruptTransmit, dev);
                free_irq(priv->interruptReceive, dev);
        } else {
-               free_irq(priv->interruptTransmit, dev);
-       }
-
-       if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
-               free_irq(priv->einfo->interruptPHY, dev);
-       } else {
-               del_timer_sync(&priv->phy_info_timer);
+               free_irq(priv->interruptTransmit, dev);
        }
 
        free_skb_resources(priv);
@@ -649,7 +549,7 @@ void stop_gfar(struct net_device *dev)
 
 /* If there are any tx skbs or rx skbs still around, free them.
  * Then free tx_skbuff and rx_skbuff */
-void free_skb_resources(struct gfar_private *priv)
+static void free_skb_resources(struct gfar_private *priv)
 {
        struct rxbd8 *rxbdp;
        struct txbd8 *txbdp;
@@ -770,7 +670,7 @@ int startup_gfar(struct net_device *dev)
            (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
                                        priv->tx_ring_size, GFP_KERNEL);
 
-       if (priv->tx_skbuff == NULL) {
+       if (NULL == priv->tx_skbuff) {
                if (netif_msg_ifup(priv))
                        printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
                                        dev->name);
@@ -785,7 +685,7 @@ int startup_gfar(struct net_device *dev)
            (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
                                        priv->rx_ring_size, GFP_KERNEL);
 
-       if (priv->rx_skbuff == NULL) {
+       if (NULL == priv->rx_skbuff) {
                if (netif_msg_ifup(priv))
                        printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
                                        dev->name);
@@ -879,13 +779,7 @@ int startup_gfar(struct net_device *dev)
                }
        }
 
-       /* Set up the PHY change work queue */
-       INIT_WORK(&priv->tq, gfar_phy_change, dev);
-
-       init_timer(&priv->phy_info_timer);
-       priv->phy_info_timer.function = &gfar_phy_startup_timer;
-       priv->phy_info_timer.data = (unsigned long) priv->mii_info;
-       mod_timer(&priv->phy_info_timer, jiffies + HZ);
+       phy_start(priv->phydev);
 
        /* Configure the coalescing support */
        if (priv->txcoalescing)
@@ -933,11 +827,6 @@ tx_skb_fail:
                        priv->tx_bd_base,
                        gfar_read(&regs->tbase0));
 
-       if (priv->mii_info->phyinfo->close)
-               priv->mii_info->phyinfo->close(priv->mii_info);
-
-       kfree(priv->mii_info);
-
        return err;
 }
 
@@ -1035,7 +924,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        txbdp->status &= TXBD_WRAP;
 
        /* Set up checksumming */
-       if ((dev->features & NETIF_F_IP_CSUM) 
+       if ((dev->features & NETIF_F_IP_CSUM)
                        && (CHECKSUM_HW == skb->ip_summed)) {
                fcb = gfar_add_fcb(skb, txbdp);
                gfar_tx_checksum(skb, fcb);
@@ -1103,11 +992,9 @@ static int gfar_close(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        stop_gfar(dev);
 
-       /* Shutdown the PHY */
-       if (priv->mii_info->phyinfo->close)
-               priv->mii_info->phyinfo->close(priv->mii_info);
-
-       kfree(priv->mii_info);
+       /* Disconnect from the PHY */
+       phy_disconnect(priv->phydev);
+       priv->phydev = NULL;
 
        netif_stop_queue(dev);
 
@@ -1343,7 +1230,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
        while ((!skb) && timeout--)
                skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
 
-       if (skb == NULL)
+       if (NULL == skb)
                return NULL;
 
        /* We need the data buffer to be aligned properly.  We will reserve
@@ -1490,7 +1377,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        struct gfar_private *priv = netdev_priv(dev);
        struct rxfcb *fcb = NULL;
 
-       if (skb == NULL) {
+       if (NULL == skb) {
                if (netif_msg_rx_err(priv))
                        printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
                priv->stats.rx_dropped++;
@@ -1718,131 +1605,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-       struct net_device *dev = (struct net_device *) dev_id;
-       struct gfar_private *priv = netdev_priv(dev);
-
-       /* Clear the interrupt */
-       mii_clear_phy_interrupt(priv->mii_info);
-
-       /* Disable PHY interrupts */
-       mii_configure_phy_interrupt(priv->mii_info,
-                       MII_INTERRUPT_DISABLED);
-
-       /* Schedule the phy change */
-       schedule_work(&priv->tq);
-
-       return IRQ_HANDLED;
-}
-
-/* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void gfar_phy_change(void *data)
-{
-       struct net_device *dev = (struct net_device *) data;
-       struct gfar_private *priv = netdev_priv(dev);
-       int result = 0;
-
-       /* Delay to give the PHY a chance to change the
-        * register state */
-       msleep(1);
-
-       /* Update the link, speed, duplex */
-       result = priv->mii_info->phyinfo->read_status(priv->mii_info);
-
-       /* Adjust the known status as long as the link
-        * isn't still coming up */
-       if((0 == result) || (priv->mii_info->link == 0))
-               adjust_link(dev);
-
-       /* Reenable interrupts, if needed */
-       if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR)
-               mii_configure_phy_interrupt(priv->mii_info,
-                               MII_INTERRUPT_ENABLED);
-}
-
-/* Called every so often on systems that don't interrupt
- * the core for PHY changes */
-static void gfar_phy_timer(unsigned long data)
-{
-       struct net_device *dev = (struct net_device *) data;
-       struct gfar_private *priv = netdev_priv(dev);
-
-       schedule_work(&priv->tq);
-
-       mod_timer(&priv->phy_info_timer, jiffies +
-                       GFAR_PHY_CHANGE_TIME * HZ);
-}
-
-/* Keep trying aneg for some time
- * If, after GFAR_AN_TIMEOUT seconds, it has not
- * finished, we switch to forced.
- * Either way, once the process has completed, we either
- * request the interrupt, or switch the timer over to
- * using gfar_phy_timer to check status */
-static void gfar_phy_startup_timer(unsigned long data)
-{
-       int result;
-       static int secondary = GFAR_AN_TIMEOUT;
-       struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
-       struct gfar_private *priv = netdev_priv(mii_info->dev);
-
-       /* Configure the Auto-negotiation */
-       result = mii_info->phyinfo->config_aneg(mii_info);
-
-       /* If autonegotiation failed to start, and
-        * we haven't timed out, reset the timer, and return */
-       if (result && secondary--) {
-               mod_timer(&priv->phy_info_timer, jiffies + HZ);
-               return;
-       } else if (result) {
-               /* Couldn't start autonegotiation.
-                * Try switching to forced */
-               mii_info->autoneg = 0;
-               result = mii_info->phyinfo->config_aneg(mii_info);
-
-               /* Forcing failed!  Give up */
-               if(result) {
-                       if (netif_msg_link(priv))
-                               printk(KERN_ERR "%s: Forcing failed!\n",
-                                               mii_info->dev->name);
-                       return;
-               }
-       }
-
-       /* Kill the timer so it can be restarted */
-       del_timer_sync(&priv->phy_info_timer);
-
-       /* Grab the PHY interrupt, if necessary/possible */
-       if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
-               if (request_irq(priv->einfo->interruptPHY,
-                                       phy_interrupt,
-                                       SA_SHIRQ,
-                                       "phy_interrupt",
-                                       mii_info->dev) < 0) {
-                       if (netif_msg_intr(priv))
-                               printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
-                                               mii_info->dev->name,
-                                       priv->einfo->interruptPHY);
-               } else {
-                       mii_configure_phy_interrupt(priv->mii_info,
-                                       MII_INTERRUPT_ENABLED);
-                       return;
-               }
-       }
-
-       /* Start the timer again, this time in order to
-        * handle a change in status */
-       init_timer(&priv->phy_info_timer);
-       priv->phy_info_timer.function = &gfar_phy_timer;
-       priv->phy_info_timer.data = (unsigned long) mii_info->dev;
-       mod_timer(&priv->phy_info_timer, jiffies +
-                       GFAR_PHY_CHANGE_TIME * HZ);
-}
-
 /* Called every time the controller might need to be made
  * aware of new link state.  The PHY code conveys this
- * information through variables in the priv structure, and this
+ * information through variables in the phydev structure, and this
  * function converts those variables into the appropriate
  * register values, and can bring down the device if needed.
  */
@@ -1850,84 +1615,68 @@ static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar *regs = priv->regs;
-       u32 tempval;
-       struct gfar_mii_info *mii_info = priv->mii_info;
+       unsigned long flags;
+       struct phy_device *phydev = priv->phydev;
+       int new_state = 0;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (phydev->link) {
+               u32 tempval = gfar_read(&regs->maccfg2);
 
-       if (mii_info->link) {
                /* Now we make sure that we can be in full duplex mode.
                 * If not, we operate in half-duplex mode. */
-               if (mii_info->duplex != priv->oldduplex) {
-                       if (!(mii_info->duplex)) {
-                               tempval = gfar_read(&regs->maccfg2);
+               if (phydev->duplex != priv->oldduplex) {
+                       new_state = 1;
+                       if (!(phydev->duplex))
                                tempval &= ~(MACCFG2_FULL_DUPLEX);
-                               gfar_write(&regs->maccfg2, tempval);
-
-                               if (netif_msg_link(priv))
-                                       printk(KERN_INFO "%s: Half Duplex\n",
-                                                       dev->name);
-                       } else {
-                               tempval = gfar_read(&regs->maccfg2);
+                       else
                                tempval |= MACCFG2_FULL_DUPLEX;
-                               gfar_write(&regs->maccfg2, tempval);
 
-                               if (netif_msg_link(priv))
-                                       printk(KERN_INFO "%s: Full Duplex\n",
-                                                       dev->name);
-                       }
-
-                       priv->oldduplex = mii_info->duplex;
+                       priv->oldduplex = phydev->duplex;
                }
 
-               if (mii_info->speed != priv->oldspeed) {
-                       switch (mii_info->speed) {
+               if (phydev->speed != priv->oldspeed) {
+                       new_state = 1;
+                       switch (phydev->speed) {
                        case 1000:
-                               tempval = gfar_read(&regs->maccfg2);
                                tempval =
                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
-                               gfar_write(&regs->maccfg2, tempval);
                                break;
                        case 100:
                        case 10:
-                               tempval = gfar_read(&regs->maccfg2);
                                tempval =
                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
-                               gfar_write(&regs->maccfg2, tempval);
                                break;
                        default:
                                if (netif_msg_link(priv))
                                        printk(KERN_WARNING
-                                                       "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
-                                                       dev->name, mii_info->speed);
+                                               "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
+                                               dev->name, phydev->speed);
                                break;
                        }
 
-                       if (netif_msg_link(priv))
-                               printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
-                                               mii_info->speed);
-
-                       priv->oldspeed = mii_info->speed;
+                       priv->oldspeed = phydev->speed;
                }
 
+               gfar_write(&regs->maccfg2, tempval);
+
                if (!priv->oldlink) {
-                       if (netif_msg_link(priv))
-                               printk(KERN_INFO "%s: Link is up\n", dev->name);
+                       new_state = 1;
                        priv->oldlink = 1;
-                       netif_carrier_on(dev);
                        netif_schedule(dev);
                }
-       } else {
-               if (priv->oldlink) {
-                       if (netif_msg_link(priv))
-                               printk(KERN_INFO "%s: Link is down\n",
-                                               dev->name);
-                       priv->oldlink = 0;
-                       priv->oldspeed = 0;
-                       priv->oldduplex = -1;
-                       netif_carrier_off(dev);
-               }
+       } else if (priv->oldlink) {
+               new_state = 1;
+               priv->oldlink = 0;
+               priv->oldspeed = 0;
+               priv->oldduplex = -1;
        }
-}
 
+       if (new_state && netif_msg_link(priv))
+               phy_print_status(phydev);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
 
 /* Update the hash table based on the current list of multicast
  * addresses we subscribe to.  Also, change the promiscuity of
@@ -2122,12 +1871,23 @@ static struct device_driver gfar_driver = {
 
 static int __init gfar_init(void)
 {
-       return driver_register(&gfar_driver);
+       int err = gfar_mdio_init();
+
+       if (err)
+               return err;
+
+       err = driver_register(&gfar_driver);
+
+       if (err)
+               gfar_mdio_exit();
+       
+       return err;
 }
 
 static void __exit gfar_exit(void)
 {
        driver_unregister(&gfar_driver);
+       gfar_mdio_exit();
 }
 
 module_init(gfar_init);
index 28af087d9fbba24d0005fd66c34f765005565610..c77ca6c0d04a6d651c0e3d37ca749146b5e9bc5c 100644 (file)
@@ -17,7 +17,6 @@
  *
  *  Still left to do:
  *      -Add support for module parameters
- *     -Add support for ethtool -s
  *     -Add patch for ethtool phys id
  */
 #ifndef __GIANFAR_H
@@ -37,7 +36,8 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
-#include <linux/fsl_devices.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -48,7 +48,8 @@
 #include <linux/workqueue.h>
 #include <linux/ethtool.h>
 #include <linux/netdevice.h>
-#include "gianfar_phy.h"
+#include <linux/fsl_devices.h>
+#include "gianfar_mii.h"
 
 /* The maximum number of packets to be handled in one call of gfar_poll */
 #define GFAR_DEV_WEIGHT 64
@@ -73,7 +74,7 @@
 #define PHY_INIT_TIMEOUT 100000
 #define GFAR_PHY_CHANGE_TIME 2
 
-#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.1, "
+#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, "
 #define DRV_NAME "gfar-enet"
 extern const char gfar_driver_name[];
 extern const char gfar_driver_version[];
@@ -578,12 +579,7 @@ struct gfar {
        u32     hafdup;         /* 0x.50c - Half Duplex Register */
        u32     maxfrm;         /* 0x.510 - Maximum Frame Length Register */
        u8      res18[12];
-       u32     miimcfg;        /* 0x.520 - MII Management Configuration Register */
-       u32     miimcom;        /* 0x.524 - MII Management Command Register */
-       u32     miimadd;        /* 0x.528 - MII Management Address Register */
-       u32     miimcon;        /* 0x.52c - MII Management Control Register */
-       u32     miimstat;       /* 0x.530 - MII Management Status Register */
-       u32     miimind;        /* 0x.534 - MII Management Indicator Register */
+       u8      gfar_mii_regs[24];      /* See gianfar_phy.h */
        u8      res19[4];
        u32     ifstat;         /* 0x.53c - Interface Status Register */
        u32     macstnaddr1;    /* 0x.540 - Station Address Part 1 Register */
@@ -688,9 +684,6 @@ struct gfar_private {
        struct gfar *regs;      /* Pointer to the GFAR memory mapped Registers */
        u32 *hash_regs[16];
        int hash_width;
-       struct gfar *phyregs;
-       struct work_struct tq;
-       struct timer_list phy_info_timer;
        struct net_device_stats stats; /* linux network statistics */
        struct gfar_extra_stats extra_stats;
        spinlock_t lock;
@@ -710,7 +703,8 @@ struct gfar_private {
        unsigned int interruptError;
        struct gianfar_platform_data *einfo;
 
-       struct gfar_mii_info *mii_info;
+       struct phy_device *phydev;
+       struct mii_bus *mii_bus;
        int oldspeed;
        int oldduplex;
        int oldlink;
@@ -732,4 +726,12 @@ extern inline void gfar_write(volatile unsigned *addr, u32 val)
 
 extern struct ethtool_ops *gfar_op_array[];
 
+extern irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+extern int startup_gfar(struct net_device *dev);
+extern void stop_gfar(struct net_device *dev);
+extern void gfar_halt(struct net_device *dev);
+extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
+               int enable, u32 regnum, u32 read);
+void gfar_setup_stashing(struct net_device *dev);
+
 #endif /* __GIANFAR_H */
index a451de629197b13e1204f59b2707fef1d425e91d..68e3578e76133b8783b5dcfa1db03367c5e0d9ac 100644 (file)
 #include <asm/types.h>
 #include <asm/uaccess.h>
 #include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
 
 #include "gianfar.h"
 
 #define is_power_of_2(x)        ((x) != 0 && (((x) & ((x) - 1)) == 0))
 
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
 extern void gfar_start(struct net_device *dev);
 extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
 
+#define GFAR_MAX_COAL_USECS 0xffff
+#define GFAR_MAX_COAL_FRAMES 0xff
 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
                     u64 * buf);
 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
@@ -182,38 +183,32 @@ static void gfar_gdrvinfo(struct net_device *dev, struct
        drvinfo->eedump_len = 0;
 }
 
+
+static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+
+       if (NULL == phydev)
+               return -ENODEV;
+
+       return phy_ethtool_sset(phydev, cmd);
+}
+
+
 /* Return the current settings in the ethtool_cmd structure */
 static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       uint gigabit_support = 
-               priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
-                       SUPPORTED_1000baseT_Full : 0;
-       uint gigabit_advert = 
-               priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
-                       ADVERTISED_1000baseT_Full: 0;
-
-       cmd->supported = (SUPPORTED_10baseT_Half
-                         | SUPPORTED_100baseT_Half
-                         | SUPPORTED_100baseT_Full
-                         | gigabit_support | SUPPORTED_Autoneg);
-
-       /* For now, we always advertise everything */
-       cmd->advertising = (ADVERTISED_10baseT_Half
-                           | ADVERTISED_100baseT_Half
-                           | ADVERTISED_100baseT_Full
-                           | gigabit_advert | ADVERTISED_Autoneg);
-
-       cmd->speed = priv->mii_info->speed;
-       cmd->duplex = priv->mii_info->duplex;
-       cmd->port = PORT_MII;
-       cmd->phy_address = priv->mii_info->mii_id;
-       cmd->transceiver = XCVR_EXTERNAL;
-       cmd->autoneg = AUTONEG_ENABLE;
+       struct phy_device *phydev = priv->phydev;
+
+       if (NULL == phydev)
+               return -ENODEV;
+       
        cmd->maxtxpkt = priv->txcount;
        cmd->maxrxpkt = priv->rxcount;
 
-       return 0;
+       return phy_ethtool_gset(phydev, cmd);
 }
 
 /* Return the length of the register structure */
@@ -241,14 +236,14 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
        unsigned int count;
 
        /* The timer is different, depending on the interface speed */
-       switch (priv->mii_info->speed) {
-       case 1000:
+       switch (priv->phydev->speed) {
+       case SPEED_1000:
                count = GFAR_GBIT_TIME;
                break;
-       case 100:
+       case SPEED_100:
                count = GFAR_100_TIME;
                break;
-       case 10:
+       case SPEED_10:
        default:
                count = GFAR_10_TIME;
                break;
@@ -265,14 +260,14 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
        unsigned int count;
 
        /* The timer is different, depending on the interface speed */
-       switch (priv->mii_info->speed) {
-       case 1000:
+       switch (priv->phydev->speed) {
+       case SPEED_1000:
                count = GFAR_GBIT_TIME;
                break;
-       case 100:
+       case SPEED_100:
                count = GFAR_100_TIME;
                break;
-       case 10:
+       case SPEED_10:
        default:
                count = GFAR_10_TIME;
                break;
@@ -292,6 +287,9 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
        if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
                return -EOPNOTSUPP;
 
+       if (NULL == priv->phydev)
+               return -ENODEV;
+
        cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
        cvals->rx_max_coalesced_frames = priv->rxcount;
 
@@ -348,6 +346,22 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
        else
                priv->rxcoalescing = 1;
 
+       if (NULL == priv->phydev)
+               return -ENODEV;
+
+       /* Check the bounds of the values */
+       if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+               pr_info("Coalescing is limited to %d microseconds\n",
+                               GFAR_MAX_COAL_USECS);
+               return -EINVAL;
+       }
+
+       if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+               pr_info("Coalescing is limited to %d frames\n",
+                               GFAR_MAX_COAL_FRAMES);
+               return -EINVAL;
+       }
+
        priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
        priv->rxcount = cvals->rx_max_coalesced_frames;
 
@@ -358,6 +372,19 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
        else
                priv->txcoalescing = 1;
 
+       /* Check the bounds of the values */
+       if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+               pr_info("Coalescing is limited to %d microseconds\n",
+                               GFAR_MAX_COAL_USECS);
+               return -EINVAL;
+       }
+
+       if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+               pr_info("Coalescing is limited to %d frames\n",
+                               GFAR_MAX_COAL_FRAMES);
+               return -EINVAL;
+       }
+
        priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
        priv->txcount = cvals->tx_max_coalesced_frames;
 
@@ -536,6 +563,7 @@ static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
 
 struct ethtool_ops gfar_ethtool_ops = {
        .get_settings = gfar_gsettings,
+       .set_settings = gfar_ssettings,
        .get_drvinfo = gfar_gdrvinfo,
        .get_regs_len = gfar_reglen,
        .get_regs = gfar_get_regs,
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
new file mode 100644 (file)
index 0000000..1eca1db
--- /dev/null
@@ -0,0 +1,219 @@
+/* 
+ * drivers/net/gianfar_mii.c
+ *
+ * Gianfar Ethernet Driver -- MIIM bus implementation
+ * Provides Bus interface for MIIM regs
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <asm/ocp.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "gianfar.h"
+#include "gianfar_mii.h"
+
+/* Write value to the PHY at mii_id at register regnum,
+ * on the bus, waiting until the write is done before returning.
+ * All PHY configuration is done through the TSEC1 MIIM regs */
+int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
+{
+       struct gfar_mii *regs = bus->priv;
+
+       /* Set the PHY address and the register address we want to write */
+       gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
+
+       /* Write out the value we want */
+       gfar_write(&regs->miimcon, value);
+
+       /* Wait for the transaction to finish */
+       while (gfar_read(&regs->miimind) & MIIMIND_BUSY)
+               cpu_relax();
+
+       return 0;
+}
+
+/* Read the bus for PHY at addr mii_id, register regnum, and
+ * return the value.  Clears miimcom first.  All PHY
+ * configuration has to be done through the TSEC1 MIIM regs */
+int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+       struct gfar_mii *regs = bus->priv;
+       u16 value;
+
+       /* Set the PHY address and the register address we want to read */
+       gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
+
+       /* Clear miimcom, and then initiate a read */
+       gfar_write(&regs->miimcom, 0);
+       gfar_write(&regs->miimcom, MII_READ_COMMAND);
+
+       /* Wait for the transaction to finish */
+       while (gfar_read(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
+               cpu_relax();
+
+       /* Grab the value of the register from miimstat */
+       value = gfar_read(&regs->miimstat);
+
+       return value;
+}
+
+
+/* Reset the MIIM registers, and wait for the bus to free */
+int gfar_mdio_reset(struct mii_bus *bus)
+{
+       struct gfar_mii *regs = bus->priv;
+       unsigned int timeout = PHY_INIT_TIMEOUT;
+
+       spin_lock_bh(&bus->mdio_lock);
+
+       /* Reset the management interface */
+       gfar_write(&regs->miimcfg, MIIMCFG_RESET);
+
+       /* Setup the MII Mgmt clock speed */
+       gfar_write(&regs->miimcfg, MIIMCFG_INIT_VALUE);
+
+       /* Wait until the bus is free */
+       while ((gfar_read(&regs->miimind) & MIIMIND_BUSY) &&
+                       timeout--)
+               cpu_relax();
+
+       spin_unlock_bh(&bus->mdio_lock);
+
+       if(timeout <= 0) {
+               printk(KERN_ERR "%s: The MII Bus is stuck!\n",
+                               bus->name);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+
+int gfar_mdio_probe(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct gianfar_mdio_data *pdata;
+       struct gfar_mii *regs;
+       struct mii_bus *new_bus;
+       int err = 0;
+
+       if (NULL == dev)
+               return -EINVAL;
+
+       new_bus = kmalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+       if (NULL == new_bus)
+               return -ENOMEM;
+
+       new_bus->name = "Gianfar MII Bus",
+       new_bus->read = &gfar_mdio_read,
+       new_bus->write = &gfar_mdio_write,
+       new_bus->reset = &gfar_mdio_reset,
+       new_bus->id = pdev->id;
+
+       pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
+
+       if (NULL == pdata) {
+               printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
+               return -ENODEV;
+       }
+
+       /* Set the PHY base address */
+       regs = (struct gfar_mii *) ioremap(pdata->paddr, 
+                       sizeof (struct gfar_mii));
+
+       if (NULL == regs) {
+               err = -ENOMEM;
+               goto reg_map_fail;
+       }
+
+       new_bus->priv = regs;
+
+       new_bus->irq = pdata->irq;
+
+       new_bus->dev = dev;
+       dev_set_drvdata(dev, new_bus);
+
+       err = mdiobus_register(new_bus);
+
+       if (0 != err) {
+               printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 
+                               new_bus->name);
+               goto bus_register_fail;
+       }
+
+       return 0;
+
+bus_register_fail:
+       iounmap((void *) regs);
+reg_map_fail:
+       kfree(new_bus);
+
+       return err;
+}
+
+
+int gfar_mdio_remove(struct device *dev)
+{
+       struct mii_bus *bus = dev_get_drvdata(dev);
+
+       mdiobus_unregister(bus);
+
+       dev_set_drvdata(dev, NULL);
+
+       iounmap((void *) (&bus->priv));
+       bus->priv = NULL;
+       kfree(bus);
+
+       return 0;
+}
+
+static struct device_driver gianfar_mdio_driver = {
+       .name = "fsl-gianfar_mdio",
+       .bus = &platform_bus_type,
+       .probe = gfar_mdio_probe,
+       .remove = gfar_mdio_remove,
+};
+
+int __init gfar_mdio_init(void)
+{
+       return driver_register(&gianfar_mdio_driver);
+}
+
+void __exit gfar_mdio_exit(void)
+{
+       driver_unregister(&gianfar_mdio_driver);
+}
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
new file mode 100644 (file)
index 0000000..56e5665
--- /dev/null
@@ -0,0 +1,45 @@
+/* 
+ * drivers/net/gianfar_mii.h
+ *
+ * Gianfar Ethernet Driver -- MII Management Bus Implementation
+ * Driver for the MDIO bus controller in the Gianfar register space
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __GIANFAR_MII_H
+#define __GIANFAR_MII_H
+
+#define MIIMIND_BUSY            0x00000001
+#define MIIMIND_NOTVALID        0x00000004
+
+#define MII_READ_COMMAND       0x00000001
+
+#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
+               | SUPPORTED_100baseT_Half \
+               | SUPPORTED_100baseT_Full \
+               | SUPPORTED_Autoneg \
+               | SUPPORTED_MII)
+
+struct gfar_mii {
+       u32     miimcfg;        /* 0x.520 - MII Management Config Register */
+       u32     miimcom;        /* 0x.524 - MII Management Command Register */
+       u32     miimadd;        /* 0x.528 - MII Management Address Register */
+       u32     miimcon;        /* 0x.52c - MII Management Control Register */
+       u32     miimstat;       /* 0x.530 - MII Management Status Register */
+       u32     miimind;        /* 0x.534 - MII Management Indicator Register */
+};
+
+int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
+int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+int __init gfar_mdio_init(void);
+void __exit gfar_mdio_exit(void);
+#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c
deleted file mode 100644 (file)
index 7c965f2..0000000
+++ /dev/null
@@ -1,661 +0,0 @@
-/* 
- * drivers/net/gianfar_phy.c
- *
- * Gianfar Ethernet Driver -- PHY handling
- * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (kumar.gala@freescale.com)
- *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-
-#include "gianfar.h"
-#include "gianfar_phy.h"
-
-static void config_genmii_advert(struct gfar_mii_info *mii_info);
-static void genmii_setup_forced(struct gfar_mii_info *mii_info);
-static void genmii_restart_aneg(struct gfar_mii_info *mii_info);
-static int gbit_config_aneg(struct gfar_mii_info *mii_info);
-static int genmii_config_aneg(struct gfar_mii_info *mii_info);
-static int genmii_update_link(struct gfar_mii_info *mii_info);
-static int genmii_read_status(struct gfar_mii_info *mii_info);
-u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum);
-void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val);
-
-/* Write value to the PHY for this device to the register at regnum, */
-/* waiting until the write is done before it returns.  All PHY */
-/* configuration has to be done through the TSEC1 MIIM regs */
-void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
-{
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar *regbase = priv->phyregs;
-
-       /* Set the PHY address and the register address we want to write */
-       gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
-
-       /* Write out the value we want */
-       gfar_write(&regbase->miimcon, value);
-
-       /* Wait for the transaction to finish */
-       while (gfar_read(&regbase->miimind) & MIIMIND_BUSY)
-               cpu_relax();
-}
-
-/* Reads from register regnum in the PHY for device dev, */
-/* returning the value.  Clears miimcom first.  All PHY */
-/* configuration has to be done through the TSEC1 MIIM regs */
-int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
-{
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar *regbase = priv->phyregs;
-       u16 value;
-
-       /* Set the PHY address and the register address we want to read */
-       gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
-
-       /* Clear miimcom, and then initiate a read */
-       gfar_write(&regbase->miimcom, 0);
-       gfar_write(&regbase->miimcom, MII_READ_COMMAND);
-
-       /* Wait for the transaction to finish */
-       while (gfar_read(&regbase->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
-               cpu_relax();
-
-       /* Grab the value of the register from miimstat */
-       value = gfar_read(&regbase->miimstat);
-
-       return value;
-}
-
-void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info)
-{
-       if(mii_info->phyinfo->ack_interrupt)
-               mii_info->phyinfo->ack_interrupt(mii_info);
-}
-
-
-void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts)
-{
-       mii_info->interrupts = interrupts;
-       if(mii_info->phyinfo->config_intr)
-               mii_info->phyinfo->config_intr(mii_info);
-}
-
-
-/* Writes MII_ADVERTISE with the appropriate values, after
- * sanitizing advertise to make sure only supported features
- * are advertised 
- */
-static void config_genmii_advert(struct gfar_mii_info *mii_info)
-{
-       u32 advertise;
-       u16 adv;
-
-       /* Only allow advertising what this PHY supports */
-       mii_info->advertising &= mii_info->phyinfo->features;
-       advertise = mii_info->advertising;
-
-       /* Setup standard advertisement */
-       adv = phy_read(mii_info, MII_ADVERTISE);
-       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-       if (advertise & ADVERTISED_10baseT_Half)
-               adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               adv |= ADVERTISE_100FULL;
-       phy_write(mii_info, MII_ADVERTISE, adv);
-}
-
-static void genmii_setup_forced(struct gfar_mii_info *mii_info)
-{
-       u16 ctrl;
-       u32 features = mii_info->phyinfo->features;
-       
-       ctrl = phy_read(mii_info, MII_BMCR);
-
-       ctrl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPEED1000|BMCR_ANENABLE);
-       ctrl |= BMCR_RESET;
-
-       switch(mii_info->speed) {
-               case SPEED_1000:
-                       if(features & (SUPPORTED_1000baseT_Half
-                                               | SUPPORTED_1000baseT_Full)) {
-                               ctrl |= BMCR_SPEED1000;
-                               break;
-                       }
-                       mii_info->speed = SPEED_100;
-               case SPEED_100:
-                       if (features & (SUPPORTED_100baseT_Half
-                                               | SUPPORTED_100baseT_Full)) {
-                               ctrl |= BMCR_SPEED100;
-                               break;
-                       }
-                       mii_info->speed = SPEED_10;
-               case SPEED_10:
-                       if (features & (SUPPORTED_10baseT_Half
-                                               | SUPPORTED_10baseT_Full))
-                               break;
-               default: /* Unsupported speed! */
-                       printk(KERN_ERR "%s: Bad speed!\n", 
-                                       mii_info->dev->name);
-                       break;
-       }
-
-       phy_write(mii_info, MII_BMCR, ctrl);
-}
-
-
-/* Enable and Restart Autonegotiation */
-static void genmii_restart_aneg(struct gfar_mii_info *mii_info)
-{
-       u16 ctl;
-
-       ctl = phy_read(mii_info, MII_BMCR);
-       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
-       phy_write(mii_info, MII_BMCR, ctl);
-}
-
-
-static int gbit_config_aneg(struct gfar_mii_info *mii_info)
-{
-       u16 adv;
-       u32 advertise;
-
-       if(mii_info->autoneg) {
-               /* Configure the ADVERTISE register */
-               config_genmii_advert(mii_info);
-               advertise = mii_info->advertising;
-
-               adv = phy_read(mii_info, MII_1000BASETCONTROL);
-               adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
-                               MII_1000BASETCONTROL_HALFDUPLEXCAP);
-               if (advertise & SUPPORTED_1000baseT_Half)
-                       adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
-               if (advertise & SUPPORTED_1000baseT_Full)
-                       adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
-               phy_write(mii_info, MII_1000BASETCONTROL, adv);
-
-               /* Start/Restart aneg */
-               genmii_restart_aneg(mii_info);
-       } else
-               genmii_setup_forced(mii_info);
-
-       return 0;
-}
-
-static int marvell_config_aneg(struct gfar_mii_info *mii_info)
-{
-       /* The Marvell PHY has an errata which requires
-        * that certain registers get written in order
-        * to restart autonegotiation */
-       phy_write(mii_info, MII_BMCR, BMCR_RESET);
-
-       phy_write(mii_info, 0x1d, 0x1f);
-       phy_write(mii_info, 0x1e, 0x200c);
-       phy_write(mii_info, 0x1d, 0x5);
-       phy_write(mii_info, 0x1e, 0);
-       phy_write(mii_info, 0x1e, 0x100);
-
-       gbit_config_aneg(mii_info);
-
-       return 0;
-}
-static int genmii_config_aneg(struct gfar_mii_info *mii_info)
-{
-       if (mii_info->autoneg) {
-               config_genmii_advert(mii_info);
-               genmii_restart_aneg(mii_info);
-       } else
-               genmii_setup_forced(mii_info);
-
-       return 0;
-}
-
-
-static int genmii_update_link(struct gfar_mii_info *mii_info)
-{
-       u16 status;
-
-       /* Do a fake read */
-       phy_read(mii_info, MII_BMSR);
-
-       /* Read link and autonegotiation status */
-       status = phy_read(mii_info, MII_BMSR);
-       if ((status & BMSR_LSTATUS) == 0)
-               mii_info->link = 0;
-       else
-               mii_info->link = 1;
-
-       /* If we are autonegotiating, and not done, 
-        * return an error */
-       if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
-               return -EAGAIN;
-
-       return 0;
-}
-
-static int genmii_read_status(struct gfar_mii_info *mii_info)
-{
-       u16 status;
-       int err;
-
-       /* Update the link, but return if there
-        * was an error */
-       err = genmii_update_link(mii_info);
-       if (err)
-               return err;
-
-       if (mii_info->autoneg) {
-               status = phy_read(mii_info, MII_LPA);
-
-               if (status & (LPA_10FULL | LPA_100FULL))
-                       mii_info->duplex = DUPLEX_FULL;
-               else
-                       mii_info->duplex = DUPLEX_HALF;
-               if (status & (LPA_100FULL | LPA_100HALF))
-                       mii_info->speed = SPEED_100;
-               else
-                       mii_info->speed = SPEED_10;
-               mii_info->pause = 0;
-       }
-       /* On non-aneg, we assume what we put in BMCR is the speed,
-        * though magic-aneg shouldn't prevent this case from occurring
-        */
-
-       return 0;
-}
-static int marvell_read_status(struct gfar_mii_info *mii_info)
-{
-       u16 status;
-       int err;
-
-       /* Update the link, but return if there
-        * was an error */
-       err = genmii_update_link(mii_info);
-       if (err)
-               return err;
-
-       /* If the link is up, read the speed and duplex */
-       /* If we aren't autonegotiating, assume speeds 
-        * are as set */
-       if (mii_info->autoneg && mii_info->link) {
-               int speed;
-               status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
-
-#if 0
-               /* If speed and duplex aren't resolved,
-                * return an error.  Isn't this handled
-                * by checking aneg?
-                */
-               if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
-                       return -EAGAIN;
-#endif
-
-               /* Get the duplexity */
-               if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
-                       mii_info->duplex = DUPLEX_FULL;
-               else
-                       mii_info->duplex = DUPLEX_HALF;
-
-               /* Get the speed */
-               speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
-               switch(speed) {
-                       case MII_M1011_PHY_SPEC_STATUS_1000:
-                               mii_info->speed = SPEED_1000;
-                               break;
-                       case MII_M1011_PHY_SPEC_STATUS_100:
-                               mii_info->speed = SPEED_100;
-                               break;
-                       default:
-                               mii_info->speed = SPEED_10;
-                               break;
-               }
-               mii_info->pause = 0;
-       }
-
-       return 0;
-}
-
-
-static int cis820x_read_status(struct gfar_mii_info *mii_info)
-{
-       u16 status;
-       int err;
-
-       /* Update the link, but return if there
-        * was an error */
-       err = genmii_update_link(mii_info);
-       if (err)
-               return err;
-
-       /* If the link is up, read the speed and duplex */
-       /* If we aren't autonegotiating, assume speeds 
-        * are as set */
-       if (mii_info->autoneg && mii_info->link) {
-               int speed;
-
-               status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
-               if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
-                       mii_info->duplex = DUPLEX_FULL;
-               else
-                       mii_info->duplex = DUPLEX_HALF;
-
-               speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
-
-               switch (speed) {
-               case MII_CIS8201_AUXCONSTAT_GBIT:
-                       mii_info->speed = SPEED_1000;
-                       break;
-               case MII_CIS8201_AUXCONSTAT_100:
-                       mii_info->speed = SPEED_100;
-                       break;
-               default:
-                       mii_info->speed = SPEED_10;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int marvell_ack_interrupt(struct gfar_mii_info *mii_info)
-{
-       /* Clear the interrupts by reading the reg */
-       phy_read(mii_info, MII_M1011_IEVENT);
-
-       return 0;
-}
-
-static int marvell_config_intr(struct gfar_mii_info *mii_info)
-{
-       if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
-               phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
-       else
-               phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
-
-       return 0;
-}
-
-static int cis820x_init(struct gfar_mii_info *mii_info)
-{
-       phy_write(mii_info, MII_CIS8201_AUX_CONSTAT, 
-                       MII_CIS8201_AUXCONSTAT_INIT);
-       phy_write(mii_info, MII_CIS8201_EXT_CON1,
-                       MII_CIS8201_EXTCON1_INIT);
-
-       return 0;
-}
-
-static int cis820x_ack_interrupt(struct gfar_mii_info *mii_info)
-{
-       phy_read(mii_info, MII_CIS8201_ISTAT);
-
-       return 0;
-}
-
-static int cis820x_config_intr(struct gfar_mii_info *mii_info)
-{
-       if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
-               phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
-       else
-               phy_write(mii_info, MII_CIS8201_IMASK, 0);
-
-       return 0;
-}
-
-#define DM9161_DELAY 10
-
-static int dm9161_read_status(struct gfar_mii_info *mii_info)
-{
-       u16 status;
-       int err;
-
-       /* Update the link, but return if there
-        * was an error */
-       err = genmii_update_link(mii_info);
-       if (err)
-               return err;
-
-       /* If the link is up, read the speed and duplex */
-       /* If we aren't autonegotiating, assume speeds 
-        * are as set */
-       if (mii_info->autoneg && mii_info->link) {
-               status = phy_read(mii_info, MII_DM9161_SCSR);
-               if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
-                       mii_info->speed = SPEED_100;
-               else
-                       mii_info->speed = SPEED_10;
-
-               if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
-                       mii_info->duplex = DUPLEX_FULL;
-               else
-                       mii_info->duplex = DUPLEX_HALF;
-       }
-
-       return 0;
-}
-
-
-static int dm9161_config_aneg(struct gfar_mii_info *mii_info)
-{
-       struct dm9161_private *priv = mii_info->priv;
-
-       if(0 == priv->resetdone)
-               return -EAGAIN;
-
-       return 0;
-}
-
-static void dm9161_timer(unsigned long data)
-{
-       struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
-       struct dm9161_private *priv = mii_info->priv;
-       u16 status = phy_read(mii_info, MII_BMSR);
-
-       if (status & BMSR_ANEGCOMPLETE) {
-               priv->resetdone = 1;
-       } else
-               mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
-}
-
-static int dm9161_init(struct gfar_mii_info *mii_info)
-{
-       struct dm9161_private *priv;
-
-       /* Allocate the private data structure */
-       priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
-
-       if (NULL == priv)
-               return -ENOMEM;
-
-       mii_info->priv = priv;
-
-       /* Reset is not done yet */
-       priv->resetdone = 0;
-
-       /* Isolate the PHY */
-       phy_write(mii_info, MII_BMCR, BMCR_ISOLATE);
-
-       /* Do not bypass the scrambler/descrambler */
-       phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
-
-       /* Clear 10BTCSR to default */
-       phy_write(mii_info, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
-
-       /* Reconnect the PHY, and enable Autonegotiation */
-       phy_write(mii_info, MII_BMCR, BMCR_ANENABLE);
-
-       /* Start a timer for DM9161_DELAY seconds to wait
-        * for the PHY to be ready */
-       init_timer(&priv->timer);
-       priv->timer.function = &dm9161_timer;
-       priv->timer.data = (unsigned long) mii_info;
-       mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
-
-       return 0;
-}
-
-static void dm9161_close(struct gfar_mii_info *mii_info)
-{
-       struct dm9161_private *priv = mii_info->priv;
-
-       del_timer_sync(&priv->timer);
-       kfree(priv);
-}
-
-#if 0
-static int dm9161_ack_interrupt(struct gfar_mii_info *mii_info)
-{
-       phy_read(mii_info, MII_DM9161_INTR);
-
-       return 0;
-}
-#endif
-
-/* Cicada 820x */
-static struct phy_info phy_info_cis820x = {
-       0x000fc440,
-       "Cicada Cis8204",
-       0x000fffc0,
-       .features       = MII_GBIT_FEATURES,
-       .init           = &cis820x_init,
-       .config_aneg    = &gbit_config_aneg,
-       .read_status    = &cis820x_read_status,
-       .ack_interrupt  = &cis820x_ack_interrupt,
-       .config_intr    = &cis820x_config_intr,
-};
-
-static struct phy_info phy_info_dm9161 = {
-       .phy_id         = 0x0181b880,
-       .name           = "Davicom DM9161E",
-       .phy_id_mask    = 0x0ffffff0,
-       .init           = dm9161_init,
-       .config_aneg    = dm9161_config_aneg,
-       .read_status    = dm9161_read_status,
-       .close          = dm9161_close,
-};
-
-static struct phy_info phy_info_marvell = {
-       .phy_id         = 0x01410c00,
-       .phy_id_mask    = 0xffffff00,
-       .name           = "Marvell 88E1101/88E1111",
-       .features       = MII_GBIT_FEATURES,
-       .config_aneg    = &marvell_config_aneg,
-       .read_status    = &marvell_read_status,
-       .ack_interrupt  = &marvell_ack_interrupt,
-       .config_intr    = &marvell_config_intr,
-};
-
-static struct phy_info phy_info_genmii= {
-       .phy_id         = 0x00000000,
-       .phy_id_mask    = 0x00000000,
-       .name           = "Generic MII",
-       .features       = MII_BASIC_FEATURES,
-       .config_aneg    = genmii_config_aneg,
-       .read_status    = genmii_read_status,
-};
-
-static struct phy_info *phy_info[] = {
-       &phy_info_cis820x,
-       &phy_info_marvell,
-       &phy_info_dm9161,
-       &phy_info_genmii,
-       NULL
-};
-
-u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum)
-{
-       u16 retval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mii_info->mdio_lock, flags);
-       retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
-       spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
-
-       return retval;
-}
-
-void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mii_info->mdio_lock, flags);
-       mii_info->mdio_write(mii_info->dev, 
-                       mii_info->mii_id, 
-                       regnum, val);
-       spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
-}
-
-/* Use the PHY ID registers to determine what type of PHY is attached
- * to device dev.  return a struct phy_info structure describing that PHY
- */
-struct phy_info * get_phy_info(struct gfar_mii_info *mii_info)
-{
-       u16 phy_reg;
-       u32 phy_ID;
-       int i;
-       struct phy_info *theInfo = NULL;
-       struct net_device *dev = mii_info->dev;
-
-       /* Grab the bits from PHYIR1, and put them in the upper half */
-       phy_reg = phy_read(mii_info, MII_PHYSID1);
-       phy_ID = (phy_reg & 0xffff) << 16;
-
-       /* Grab the bits from PHYIR2, and put them in the lower half */
-       phy_reg = phy_read(mii_info, MII_PHYSID2);
-       phy_ID |= (phy_reg & 0xffff);
-
-       /* loop through all the known PHY types, and find one that */
-       /* matches the ID we read from the PHY. */
-       for (i = 0; phy_info[i]; i++)
-               if (phy_info[i]->phy_id == 
-                               (phy_ID & phy_info[i]->phy_id_mask)) {
-                       theInfo = phy_info[i];
-                       break;
-               }
-
-       /* This shouldn't happen, as we have generic PHY support */
-       if (theInfo == NULL) {
-               printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
-               return NULL;
-       } else {
-               printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
-                      phy_ID);
-       }
-
-       return theInfo;
-}
diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h
deleted file mode 100644 (file)
index 1e9b3ab..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-/* 
- * drivers/net/gianfar_phy.h
- *
- * Gianfar Ethernet Driver -- PHY handling
- * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (kumar.gala@freescale.com)
- *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-#ifndef __GIANFAR_PHY_H
-#define __GIANFAR_PHY_H
-
-#define MII_end ((u32)-2)
-#define MII_read ((u32)-1)
-
-#define MIIMIND_BUSY            0x00000001
-#define MIIMIND_NOTVALID        0x00000004
-
-#define GFAR_AN_TIMEOUT         2000
-
-/* 1000BT control (Marvell & BCM54xx at least) */
-#define MII_1000BASETCONTROL                   0x09
-#define MII_1000BASETCONTROL_FULLDUPLEXCAP     0x0200
-#define MII_1000BASETCONTROL_HALFDUPLEXCAP     0x0100
-
-/* Cicada Extended Control Register 1 */
-#define MII_CIS8201_EXT_CON1           0x17
-#define MII_CIS8201_EXTCON1_INIT       0x0000
-
-/* Cicada Interrupt Mask Register */
-#define MII_CIS8201_IMASK              0x19
-#define MII_CIS8201_IMASK_IEN          0x8000
-#define MII_CIS8201_IMASK_SPEED        0x4000
-#define MII_CIS8201_IMASK_LINK         0x2000
-#define MII_CIS8201_IMASK_DUPLEX       0x1000
-#define MII_CIS8201_IMASK_MASK         0xf000
-
-/* Cicada Interrupt Status Register */
-#define MII_CIS8201_ISTAT              0x1a
-#define MII_CIS8201_ISTAT_STATUS       0x8000
-#define MII_CIS8201_ISTAT_SPEED        0x4000
-#define MII_CIS8201_ISTAT_LINK         0x2000
-#define MII_CIS8201_ISTAT_DUPLEX       0x1000
-
-/* Cicada Auxiliary Control/Status Register */
-#define MII_CIS8201_AUX_CONSTAT        0x1c
-#define MII_CIS8201_AUXCONSTAT_INIT    0x0004
-#define MII_CIS8201_AUXCONSTAT_DUPLEX  0x0020
-#define MII_CIS8201_AUXCONSTAT_SPEED   0x0018
-#define MII_CIS8201_AUXCONSTAT_GBIT    0x0010
-#define MII_CIS8201_AUXCONSTAT_100     0x0008
-                                                                                
-/* 88E1011 PHY Status Register */
-#define MII_M1011_PHY_SPEC_STATUS              0x11
-#define MII_M1011_PHY_SPEC_STATUS_1000         0x8000
-#define MII_M1011_PHY_SPEC_STATUS_100          0x4000
-#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK     0xc000
-#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX   0x2000
-#define MII_M1011_PHY_SPEC_STATUS_RESOLVED     0x0800
-#define MII_M1011_PHY_SPEC_STATUS_LINK         0x0400
-
-#define MII_M1011_IEVENT               0x13
-#define MII_M1011_IEVENT_CLEAR         0x0000
-
-#define MII_M1011_IMASK                        0x12
-#define MII_M1011_IMASK_INIT           0x6400
-#define MII_M1011_IMASK_CLEAR          0x0000
-
-#define MII_DM9161_SCR         0x10
-#define MII_DM9161_SCR_INIT    0x0610
-
-/* DM9161 Specified Configuration and Status Register */
-#define MII_DM9161_SCSR        0x11
-#define MII_DM9161_SCSR_100F   0x8000
-#define MII_DM9161_SCSR_100H   0x4000
-#define MII_DM9161_SCSR_10F    0x2000
-#define MII_DM9161_SCSR_10H    0x1000
-
-/* DM9161 Interrupt Register */
-#define MII_DM9161_INTR        0x15
-#define MII_DM9161_INTR_PEND           0x8000
-#define MII_DM9161_INTR_DPLX_MASK      0x0800
-#define MII_DM9161_INTR_SPD_MASK       0x0400
-#define MII_DM9161_INTR_LINK_MASK      0x0200
-#define MII_DM9161_INTR_MASK           0x0100
-#define MII_DM9161_INTR_DPLX_CHANGE    0x0010
-#define MII_DM9161_INTR_SPD_CHANGE     0x0008
-#define MII_DM9161_INTR_LINK_CHANGE    0x0004
-#define MII_DM9161_INTR_INIT           0x0000
-#define MII_DM9161_INTR_STOP   \
-(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
- | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
-
-/* DM9161 10BT Configuration/Status */
-#define MII_DM9161_10BTCSR     0x12
-#define MII_DM9161_10BTCSR_INIT        0x7800
-
-#define MII_BASIC_FEATURES     (SUPPORTED_10baseT_Half | \
-                                SUPPORTED_10baseT_Full | \
-                                SUPPORTED_100baseT_Half | \
-                                SUPPORTED_100baseT_Full | \
-                                SUPPORTED_Autoneg | \
-                                SUPPORTED_TP | \
-                                SUPPORTED_MII)
-
-#define MII_GBIT_FEATURES      (MII_BASIC_FEATURES | \
-                                SUPPORTED_1000baseT_Half | \
-                                SUPPORTED_1000baseT_Full)
-
-#define MII_READ_COMMAND       0x00000001
-
-#define MII_INTERRUPT_DISABLED 0x0
-#define MII_INTERRUPT_ENABLED 0x1
-/* Taken from mii_if_info and sungem_phy.h */
-struct gfar_mii_info {
-       /* Information about the PHY type */
-       /* And management functions */
-       struct phy_info *phyinfo;
-
-       /* forced speed & duplex (no autoneg)
-        * partner speed & duplex & pause (autoneg)
-        */
-       int speed;
-       int duplex;
-       int pause;
-
-       /* The most recently read link state */
-       int link;
-
-       /* Enabled Interrupts */
-       u32 interrupts;
-
-       u32 advertising;
-       int autoneg;
-       int mii_id;
-
-       /* private data pointer */
-       /* For use by PHYs to maintain extra state */
-       void *priv;
-
-       /* Provided by host chip */
-       struct net_device *dev;
-
-       /* A lock to ensure that only one thing can read/write
-        * the MDIO bus at a time */
-       spinlock_t mdio_lock;
-
-       /* Provided by ethernet driver */
-       int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
-       void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
-};
-
-/* struct phy_info: a structure which defines attributes for a PHY
- *
- * id will contain a number which represents the PHY.  During
- * startup, the driver will poll the PHY to find out what its
- * UID--as defined by registers 2 and 3--is.  The 32-bit result
- * gotten from the PHY will be ANDed with phy_id_mask to
- * discard any bits which may change based on revision numbers
- * unimportant to functionality
- *
- * There are 6 commands which take a gfar_mii_info structure.
- * Each PHY must declare config_aneg, and read_status.
- */
-struct phy_info {
-       u32 phy_id;
-       char *name;
-       unsigned int phy_id_mask;
-       u32 features;
-
-       /* Called to initialize the PHY */
-       int (*init)(struct gfar_mii_info *mii_info);
-
-       /* Called to suspend the PHY for power */
-       int (*suspend)(struct gfar_mii_info *mii_info);
-
-       /* Reconfigures autonegotiation (or disables it) */
-       int (*config_aneg)(struct gfar_mii_info *mii_info);
-
-       /* Determines the negotiated speed and duplex */
-       int (*read_status)(struct gfar_mii_info *mii_info);
-
-       /* Clears any pending interrupts */
-       int (*ack_interrupt)(struct gfar_mii_info *mii_info);
-
-       /* Enables or disables interrupts */
-       int (*config_intr)(struct gfar_mii_info *mii_info);
-
-       /* Clears up any memory if needed */
-       void (*close)(struct gfar_mii_info *mii_info);
-};
-
-struct phy_info *get_phy_info(struct gfar_mii_info *mii_info);
-int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
-void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
-void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info);
-void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts);
-
-struct dm9161_private {
-       struct timer_list timer;
-       int resetdone;
-};
-
-#endif /* GIANFAR_PHY_H */
index de087cd609d98fcf916c638187ed0df9b0e9eb59..896aa02000d73079fc1ebda3b222c90cdfe3531e 100644 (file)
@@ -1,6 +1,7 @@
 config MKISS
        tristate "Serial port KISS driver"
        depends on AX25
+       select CRC16
        ---help---
          KISS is a protocol used for the exchange of data between a computer
          and a Terminal Node Controller (a small embedded system commonly
index 1756f0ed54ccc7a43de7613896f090d38c98f365..cb43a9d28774fa8886dbfed420cc69c13312df74 100644 (file)
@@ -144,7 +144,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
 {
        struct bpqdev *bpq;
 
-       list_for_each_entry(bpq, &bpq_devices, bpq_list) {
+       list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
                if (bpq->ethdev == dev)
                        return bpq->axdev;
        }
@@ -399,7 +399,7 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
        if (*pos == 0)
                return SEQ_START_TOKEN;
        
-       list_for_each_entry(bpqdev, &bpq_devices, bpq_list) {
+       list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) {
                if (i == *pos)
                        return bpqdev;
        }
@@ -418,7 +418,7 @@ static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                p = ((struct bpqdev *)v)->bpq_list.next;
 
        return (p == &bpq_devices) ? NULL 
-               : list_entry(p, struct bpqdev, bpq_list);
+               : rcu_dereference(list_entry(p, struct bpqdev, bpq_list));
 }
 
 static void bpq_seq_stop(struct seq_file *seq, void *v)
@@ -561,8 +561,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
        if (!dev_is_ethdev(dev))
                return NOTIFY_DONE;
 
-       rcu_read_lock();
-
        switch (event) {
        case NETDEV_UP:         /* new ethernet device -> new BPQ interface */
                if (bpq_get_ax25_dev(dev) == NULL)
@@ -581,7 +579,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
        default:
                break;
        }
-       rcu_read_unlock();
 
        return NOTIFY_DONE;
 }
index d9fe64b46f4bf4bc1505bee23f2e8e92c86fa772..85d6dc005be0f016aa2b64af184778de4641a37d 100644 (file)
  *
  * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
  * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
+ * Copyright (C) 2004, 05 Thomas Osterried DL9SAU <thomas@x-berg.in-berlin.de>
  */
-
 #include <linux/config.h>
 #include <linux/module.h>
 #include <asm/system.h>
 #include <linux/bitops.h>
 #include <asm/uaccess.h>
+#include <linux/crc16.h>
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 
 #include <net/ax25.h>
 
-#ifdef CONFIG_INET
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#endif
-
 #define AX_MTU         236
 
 /* SLIP/KISS protocol characters. */
@@ -80,9 +76,13 @@ struct mkiss {
 
        int             mode;
         int            crcmode;        /* MW: for FlexNet, SMACK etc.  */
-#define CRC_MODE_NONE   0
-#define CRC_MODE_FLEX   1
-#define CRC_MODE_SMACK  2
+       int             crcauto;        /* CRC auto mode */
+
+#define CRC_MODE_NONE          0
+#define CRC_MODE_FLEX          1
+#define CRC_MODE_SMACK         2
+#define CRC_MODE_FLEX_TEST     3
+#define CRC_MODE_SMACK_TEST    4
 
        atomic_t                refcnt;
        struct semaphore        dead_sem;
@@ -151,6 +151,21 @@ static int check_crc_flex(unsigned char *cp, int size)
        return 0;
 }
 
+static int check_crc_16(unsigned char *cp, int size)
+{
+       unsigned short crc = 0x0000;
+
+       if (size < 3)
+               return -1;
+
+       crc = crc16(0, cp, size);
+
+       if (crc != 0x0000)
+               return -1;
+
+       return 0;
+}
+
 /*
  * Standard encapsulation
  */
@@ -237,19 +252,42 @@ static void ax_bump(struct mkiss *ax)
 
        spin_lock_bh(&ax->buflock);
        if (ax->rbuff[0] > 0x0f) {
-               if (ax->rbuff[0] & 0x20) {
-                       ax->crcmode = CRC_MODE_FLEX;
+               if (ax->rbuff[0] & 0x80) {
+                       if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
+                               ax->stats.rx_errors++;
+                               spin_unlock_bh(&ax->buflock);
+
+                               return;
+                       }
+                       if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
+                               printk(KERN_INFO
+                                      "mkiss: %s: Switchting to crc-smack\n",
+                                      ax->dev->name);
+                               ax->crcmode = CRC_MODE_SMACK;
+                       }
+                       ax->rcount -= 2;
+                       *ax->rbuff &= ~0x80;
+               } else if (ax->rbuff[0] & 0x20)  {
                        if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
-                               ax->stats.rx_errors++;
+                               ax->stats.rx_errors++;
+                               spin_unlock_bh(&ax->buflock);
                                return;
                        }
+                       if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
+                               printk(KERN_INFO
+                                      "mkiss: %s: Switchting to crc-flexnet\n",
+                                      ax->dev->name);
+                               ax->crcmode = CRC_MODE_FLEX;
+                       }
                        ax->rcount -= 2;
-                        /* dl9sau bugfix: the trailling two bytes flexnet crc
-                         * will not be passed to the kernel. thus we have
-                         * to correct the kissparm signature, because it
-                         * indicates a crc but there's none
+
+                       /*
+                        * dl9sau bugfix: the trailling two bytes flexnet crc
+                        * will not be passed to the kernel. thus we have to
+                        * correct the kissparm signature, because it indicates
+                        * a crc but there's none
                         */
-                        *ax->rbuff &= ~0x20;
+                       *ax->rbuff &= ~0x20;
                }
        }
        spin_unlock_bh(&ax->buflock);
@@ -417,20 +455,69 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
        p = icp;
 
        spin_lock_bh(&ax->buflock);
-        switch (ax->crcmode) {
-                unsigned short crc;
+       if ((*p & 0x0f) != 0) {
+               /* Configuration Command (kissparms(1).
+                * Protocol spec says: never append CRC.
+                * This fixes a very old bug in the linux
+                * kiss driver. -- dl9sau */
+               switch (*p & 0xff) {
+               case 0x85:
+                       /* command from userspace especially for us,
+                        * not for delivery to the tnc */
+                       if (len > 1) {
+                               int cmd = (p[1] & 0xff);
+                               switch(cmd) {
+                               case 3:
+                                 ax->crcmode = CRC_MODE_SMACK;
+                                 break;
+                               case 2:
+                                 ax->crcmode = CRC_MODE_FLEX;
+                                 break;
+                               case 1:
+                                 ax->crcmode = CRC_MODE_NONE;
+                                 break;
+                               case 0:
+                               default:
+                                 ax->crcmode = CRC_MODE_SMACK_TEST;
+                                 cmd = 0;
+                               }
+                               ax->crcauto = (cmd ? 0 : 1);
+                               printk(KERN_INFO "mkiss: %s: crc mode %s %d\n", ax->dev->name, (len) ? "set to" : "is", cmd);
+                       }
+                       spin_unlock_bh(&ax->buflock);
+                       netif_start_queue(dev);
 
-       case CRC_MODE_FLEX:
-                *p |= 0x20;
-                crc = calc_crc_flex(p, len);
-                count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
-                break;
+                       return;
+               default:
+                       count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+               }
+       } else {
+               unsigned short crc;
+               switch (ax->crcmode) {
+               case CRC_MODE_SMACK_TEST:
+                       ax->crcmode  = CRC_MODE_FLEX_TEST;
+                       printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name);
+                       // fall through
+               case CRC_MODE_SMACK:
+                       *p |= 0x80;
+                       crc = swab16(crc16(0, p, len));
+                       count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+                       break;
+               case CRC_MODE_FLEX_TEST:
+                       ax->crcmode = CRC_MODE_NONE;
+                       printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name);
+                       // fall through
+               case CRC_MODE_FLEX:
+                       *p |= 0x20;
+                       crc = calc_crc_flex(p, len);
+                       count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+                       break;
+
+               default:
+                       count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+               }
+       }
 
-       default:
-                count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
-                break;
-       }
-       
        set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
        actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
        ax->stats.tx_packets++;
@@ -439,8 +526,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
        ax->dev->trans_start = jiffies;
        ax->xleft = count - actual;
        ax->xhead = ax->xbuff + actual;
-
-       spin_unlock_bh(&ax->buflock);
 }
 
 /* Encapsulate an AX.25 packet and kick it into a TTY queue. */
@@ -622,7 +707,7 @@ static void ax_setup(struct net_device *dev)
  * best way to fix this is to use a rwlock in the tty struct, but for now we
  * use a single global rwlock for all ttys in ppp line discipline.
  */
-static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(disc_data_lock);
 
 static struct mkiss *mkiss_get(struct tty_struct *tty)
 {
@@ -643,6 +728,8 @@ static void mkiss_put(struct mkiss *ax)
                up(&ax->dead_sem);
 }
 
+static int crc_force = 0;      /* Can be overridden with insmod */
+
 static int mkiss_open(struct tty_struct *tty)
 {
        struct net_device *dev;
@@ -682,6 +769,33 @@ static int mkiss_open(struct tty_struct *tty)
        if (register_netdev(dev))
                goto out_free_buffers;
 
+       /* after register_netdev() - because else printk smashes the kernel */
+       switch (crc_force) {
+       case 3:
+               ax->crcmode  = CRC_MODE_SMACK;
+               printk(KERN_INFO "mkiss: %s: crc mode smack forced.\n",
+                      ax->dev->name);
+               break;
+       case 2:
+               ax->crcmode  = CRC_MODE_FLEX;
+               printk(KERN_INFO "mkiss: %s: crc mode flexnet forced.\n",
+                      ax->dev->name);
+               break;
+       case 1:
+               ax->crcmode  = CRC_MODE_NONE;
+               printk(KERN_INFO "mkiss: %s: crc mode disabled.\n",
+                      ax->dev->name);
+               break;
+       case 0:
+               /* fall through */
+       default:
+               crc_force = 0;
+               printk(KERN_INFO "mkiss: %s: crc mode is auto.\n",
+                      ax->dev->name);
+               ax->crcmode  = CRC_MODE_SMACK_TEST;
+       }
+       ax->crcauto = (crc_force ? 0 : 1);
+
        netif_start_queue(dev);
 
        /* Done.  We have linked the TTY line to a channel. */
@@ -765,7 +879,6 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
 
        case SIOCSIFHWADDR: {
                char addr[AX25_ADDR_LEN];
-printk(KERN_INFO "In SIOCSIFHWADDR");
 
                if (copy_from_user(&addr,
                                   (void __user *) arg, AX25_ADDR_LEN)) {
@@ -864,6 +977,7 @@ out:
 }
 
 static struct tty_ldisc ax_ldisc = {
+       .owner          = THIS_MODULE,
        .magic          = TTY_LDISC_MAGIC,
        .name           = "mkiss",
        .open           = mkiss_open,
@@ -904,6 +1018,8 @@ static void __exit mkiss_exit_driver(void)
 
 MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
 MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
+MODULE_PARM(crc_force, "i");
+MODULE_PARM_DESC(crc_force, "crc [0 = auto | 1 = none | 2 = flexnet | 3 = smack]");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_LDISC(N_AX25);
 
diff --git a/drivers/net/hamradio/mkiss.h b/drivers/net/hamradio/mkiss.h
deleted file mode 100644 (file)
index 4ab7004..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/****************************************************************************
- *     Defines for the Multi-KISS driver.
- ****************************************************************************/
-
-#define AX25_MAXDEV    16              /* MAX number of AX25 channels;
-                                          This can be overridden with
-                                          insmod -oax25_maxdev=nnn     */
-#define AX_MTU         236     
-
-/* SLIP/KISS protocol characters. */
-#define END             0300           /* indicates end of frame       */
-#define ESC             0333           /* indicates byte stuffing      */
-#define ESC_END         0334           /* ESC ESC_END means END 'data' */
-#define ESC_ESC         0335           /* ESC ESC_ESC means ESC 'data' */
-
-struct ax_disp {
-       int                magic;
-
-       /* Various fields. */
-       struct tty_struct  *tty;                /* ptr to TTY structure         */
-       struct net_device      *dev;            /* easy for intr handling       */
-
-       /* These are pointers to the malloc()ed frame buffers. */
-       unsigned char      *rbuff;              /* receiver buffer              */
-       int                rcount;              /* received chars counter       */
-       unsigned char      *xbuff;              /* transmitter buffer           */
-       unsigned char      *xhead;              /* pointer to next byte to XMIT */
-       int                xleft;               /* bytes left in XMIT queue     */
-
-       /* SLIP interface statistics. */
-       unsigned long      rx_packets;          /* inbound frames counter       */
-       unsigned long      tx_packets;          /* outbound frames counter      */
-       unsigned long      rx_bytes;            /* inbound bytes counter        */
-       unsigned long      tx_bytes;            /* outbound bytes counter       */
-       unsigned long      rx_errors;           /* Parity, etc. errors          */
-       unsigned long      tx_errors;           /* Planned stuff                */
-       unsigned long      rx_dropped;          /* No memory for skb            */
-       unsigned long      tx_dropped;          /* When MTU change              */
-       unsigned long      rx_over_errors;      /* Frame bigger then SLIP buf.  */
-
-       /* Detailed SLIP statistics. */
-       int                 mtu;                /* Our mtu (to spot changes!)   */
-       int                 buffsize;           /* Max buffers sizes            */
-
-
-       unsigned long   flags;          /* Flag values/ mode etc        */
-                                       /* long req'd: used by set_bit --RR */
-#define AXF_INUSE      0               /* Channel in use               */
-#define AXF_ESCAPE     1               /* ESC received                 */
-#define AXF_ERROR      2               /* Parity, etc. error           */
-#define AXF_KEEPTEST   3               /* Keepalive test flag          */
-#define AXF_OUTWAIT    4               /* is outpacket was flag        */
-
-       int                 mode;
-        int                 crcmode;    /* MW: for FlexNet, SMACK etc.  */ 
-#define CRC_MODE_NONE   0
-#define CRC_MODE_FLEX   1
-#define CRC_MODE_SMACK  2
-       spinlock_t          buflock;    /* lock for rbuf and xbuf */
-};
-
-#define AX25_MAGIC             0x5316
index cf0ac6fda1a1741390486e2168271cf3fc4ba554..b71fab6e34f4268e0dc53ad9d675cac3f998309a 100644 (file)
@@ -2517,10 +2517,8 @@ static int hp100_down_vg_link(struct net_device *dev)
        do {
                if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
                        break;
-               if (!in_interrupt()) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
-               }
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
        } while (time_after(time, jiffies));
 
        if (time_after_eq(jiffies, time))       /* no signal->no logout */
@@ -2536,10 +2534,8 @@ static int hp100_down_vg_link(struct net_device *dev)
        do {
                if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
                        break;
-               if (!in_interrupt()) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
-               }
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
        } while (time_after(time, jiffies));
 
 #ifdef HP100_DEBUG
@@ -2577,10 +2573,8 @@ static int hp100_down_vg_link(struct net_device *dev)
                do {
                        if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
                                break;
-                       if (!in_interrupt()) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       if (!in_interrupt())
+                               schedule_timeout_interruptible(1);
                } while (time_after(time, jiffies));
 
                hp100_orb(HP100_AUTO_MODE, MAC_CFG_3);  /* Autosel back on */
@@ -2591,10 +2585,8 @@ static int hp100_down_vg_link(struct net_device *dev)
        do {
                if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
                        break;
-               if (!in_interrupt()) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
-               }
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
        } while (time_after(time, jiffies));
 
        if (time_before_eq(time, jiffies)) {
@@ -2606,10 +2598,8 @@ static int hp100_down_vg_link(struct net_device *dev)
 
        time = jiffies + (2 * HZ);      /* This seems to take a while.... */
        do {
-               if (!in_interrupt()) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
-               }
+               if (!in_interrupt())
+                       schedule_timeout_interruptible(1);
        } while (time_after(time, jiffies));
 
        return 0;
@@ -2659,10 +2649,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
                do {
                        if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
                                break;
-                       if (!in_interrupt()) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       if (!in_interrupt())
+                               schedule_timeout_interruptible(1);
                } while (time_after(time, jiffies));
 
                /* Start an addressed training and optionally request promiscuous port */
@@ -2697,10 +2685,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
                do {
                        if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
                                break;
-                       if (!in_interrupt()) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       if (!in_interrupt())
+                               schedule_timeout_interruptible(1);
                } while (time_before(jiffies, time));
 
                if (time_after_eq(jiffies, time)) {
@@ -2723,10 +2709,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
 #endif
                                        break;
                                }
-                               if (!in_interrupt()) {
-                                       set_current_state(TASK_INTERRUPTIBLE);
-                                       schedule_timeout(1);
-                               }
+                               if (!in_interrupt())
+                                       schedule_timeout_interruptible(1);
                        } while (time_after(time, jiffies));
                }
 
index ca5914091d3afb9550a80b571312700492d85693..d54156f11e61a3cb043bc20f25fa48fde0fc217f 100644 (file)
@@ -400,5 +400,15 @@ config VIA_FIR
          To compile it as a module, choose M here: the module will be called
          via-ircc.
 
+config PXA_FICP
+       tristate "Intel PXA2xx Internal FICP"
+       depends on ARCH_PXA && IRDA
+       help
+         Say Y or M here if you want to build support for the PXA2xx
+         built-in IRDA interface which can support both SIR and FIR.
+         This driver relies on platform specific helper routines so
+         available capabilities may vary from one PXA2xx target to
+         another.
+
 endmenu
 
index 29a8bd812b2125394a1144d81bc2350ef2486355..e7a8b7f7f5ddf7570117d9a59b3700c550f8c2da 100644 (file)
@@ -18,6 +18,7 @@ obj-$(CONFIG_SMC_IRCC_FIR)    += smsc-ircc2.o
 obj-$(CONFIG_ALI_FIR)          += ali-ircc.o
 obj-$(CONFIG_VLSI_FIR)         += vlsi_ir.o
 obj-$(CONFIG_VIA_FIR)          += via-ircc.o
+obj-$(CONFIG_PXA_FICP)         += pxaficp_ir.o
 # Old dongle drivers for old SIR drivers
 obj-$(CONFIG_ESI_DONGLE_OLD)           += esi.o
 obj-$(CONFIG_TEKRAM_DONGLE_OLD)        += tekram.o
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
new file mode 100644 (file)
index 0000000..aef80f5
--- /dev/null
@@ -0,0 +1,871 @@
+/*
+ * linux/drivers/net/irda/pxaficp_ir.c
+ *
+ * Based on sa1100_ir.c by Russell King
+ *
+ * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/delay.h>
+#include <asm/hardware.h>
+#include <asm/arch/irda.h>
+#include <asm/arch/pxa-regs.h>
+
+#ifdef CONFIG_MACH_MAINSTONE
+#include <asm/arch/mainstone.h>
+#endif
+
+#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
+#define IrSR_RXPL_POS_IS_ZERO 0x0
+#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
+#define IrSR_TXPL_POS_IS_ZERO 0x0
+#define IrSR_XMODE_PULSE_1_6  (1<<2)
+#define IrSR_XMODE_PULSE_3_16 0x0
+#define IrSR_RCVEIR_IR_MODE   (1<<1)
+#define IrSR_RCVEIR_UART_MODE 0x0
+#define IrSR_XMITIR_IR_MODE   (1<<0)
+#define IrSR_XMITIR_UART_MODE 0x0
+
+#define IrSR_IR_RECEIVE_ON (\
+                IrSR_RXPL_NEG_IS_ZERO | \
+                IrSR_TXPL_POS_IS_ZERO | \
+                IrSR_XMODE_PULSE_3_16 | \
+                IrSR_RCVEIR_IR_MODE   | \
+                IrSR_XMITIR_UART_MODE)
+
+#define IrSR_IR_TRANSMIT_ON (\
+                IrSR_RXPL_NEG_IS_ZERO | \
+                IrSR_TXPL_POS_IS_ZERO | \
+                IrSR_XMODE_PULSE_3_16 | \
+                IrSR_RCVEIR_UART_MODE | \
+                IrSR_XMITIR_IR_MODE)
+
+struct pxa_irda {
+       int                     speed;
+       int                     newspeed;
+       unsigned long           last_oscr;
+
+       unsigned char           *dma_rx_buff;
+       unsigned char           *dma_tx_buff;
+       dma_addr_t              dma_rx_buff_phy;
+       dma_addr_t              dma_tx_buff_phy;
+       unsigned int            dma_tx_buff_len;
+       int                     txdma;
+       int                     rxdma;
+
+       struct net_device_stats stats;
+       struct irlap_cb         *irlap;
+       struct qos_info         qos;
+
+       iobuff_t                tx_buff;
+       iobuff_t                rx_buff;
+
+       struct device           *dev;
+       struct pxaficp_platform_data *pdata;
+};
+
+
+#define IS_FIR(si)             ((si)->speed >= 4000000)
+#define IRDA_FRAME_SIZE_LIMIT  2047
+
+inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
+{
+       DCSR(si->rxdma)  = DCSR_NODESC;
+       DSADR(si->rxdma) = __PREG(ICDR);
+       DTADR(si->rxdma) = si->dma_rx_buff_phy;
+       DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC |  DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
+       DCSR(si->rxdma) |= DCSR_RUN;
+}
+
+inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
+{
+       DCSR(si->txdma)  = DCSR_NODESC;
+       DSADR(si->txdma) = si->dma_tx_buff_phy;
+       DTADR(si->txdma) = __PREG(ICDR);
+       DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG |  DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
+       DCSR(si->txdma) |= DCSR_RUN;
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
+{
+       unsigned long flags;
+       unsigned int divisor;
+
+       switch (speed) {
+       case 9600:      case 19200:     case 38400:
+       case 57600:     case 115200:
+
+               /* refer to PXA250/210 Developer's Manual 10-7 */
+               /*  BaudRate = 14.7456 MHz / (16*Divisor) */
+               divisor = 14745600 / (16 * speed);
+
+               local_irq_save(flags);
+
+               if (IS_FIR(si)) {
+                       /* stop RX DMA */
+                       DCSR(si->rxdma) &= ~DCSR_RUN;
+                       /* disable FICP */
+                       ICCR0 = 0;
+                       pxa_set_cken(CKEN13_FICP, 0);
+
+                       /* set board transceiver to SIR mode */
+                       si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
+
+                       /* configure GPIO46/47 */
+                       pxa_gpio_mode(GPIO46_STRXD_MD);
+                       pxa_gpio_mode(GPIO47_STTXD_MD);
+
+                       /* enable the STUART clock */
+                       pxa_set_cken(CKEN5_STUART, 1);
+               }
+
+               /* disable STUART first */
+               STIER = 0;
+
+               /* access DLL & DLH */
+               STLCR |= LCR_DLAB;
+               STDLL = divisor & 0xff;
+               STDLH = divisor >> 8;
+               STLCR &= ~LCR_DLAB;
+
+               si->speed = speed;
+               STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
+               STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
+
+               local_irq_restore(flags);
+               break;
+
+       case 4000000:
+               local_irq_save(flags);
+
+               /* disable STUART */
+               STIER = 0;
+               STISR = 0;
+               pxa_set_cken(CKEN5_STUART, 0);
+
+               /* disable FICP first */
+               ICCR0 = 0;
+
+               /* set board transceiver to FIR mode */
+               si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
+
+               /* configure GPIO46/47 */
+               pxa_gpio_mode(GPIO46_ICPRXD_MD);
+               pxa_gpio_mode(GPIO47_ICPTXD_MD);
+
+               /* enable the FICP clock */
+               pxa_set_cken(CKEN13_FICP, 1);
+
+               si->speed = speed;
+               pxa_irda_fir_dma_rx_start(si);
+               ICCR0 = ICCR0_ITR | ICCR0_RXE;
+
+               local_irq_restore(flags);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* SIR interrupt service routine. */
+static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct net_device *dev = dev_id;
+       struct pxa_irda *si = netdev_priv(dev);
+       int iir, lsr, data;
+
+       iir = STIIR;
+
+       switch  (iir & 0x0F) {
+       case 0x06: /* Receiver Line Status */
+               lsr = STLSR;
+               while (lsr & LSR_FIFOE) {
+                       data = STRBR;
+                       if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
+                               printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
+                               si->stats.rx_errors++;
+                               if (lsr & LSR_FE)
+                                       si->stats.rx_frame_errors++;
+                               if (lsr & LSR_OE)
+                                       si->stats.rx_fifo_errors++;
+                       } else {
+                               si->stats.rx_bytes++;
+                               async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+                       }
+                       lsr = STLSR;
+               }
+               dev->last_rx = jiffies;
+               si->last_oscr = OSCR;
+               break;
+
+       case 0x04: /* Received Data Available */
+                  /* forth through */
+
+       case 0x0C: /* Character Timeout Indication */
+               do  {
+                   si->stats.rx_bytes++;
+                   async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
+               } while (STLSR & LSR_DR);
+               dev->last_rx = jiffies;
+               si->last_oscr = OSCR;
+               break;
+
+       case 0x02: /* Transmit FIFO Data Request */
+               while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
+                       STTHR = *si->tx_buff.data++;
+                       si->tx_buff.len -= 1;
+               }
+
+               if (si->tx_buff.len == 0) {
+                       si->stats.tx_packets++;
+                       si->stats.tx_bytes += si->tx_buff.data -
+                                             si->tx_buff.head;
+
+                        /* We need to ensure that the transmitter has finished. */
+                       while ((STLSR & LSR_TEMT) == 0)
+                               cpu_relax();
+                       si->last_oscr = OSCR;
+
+                       /*
+                       * Ok, we've finished transmitting.  Now enable
+                       * the receiver.  Sometimes we get a receive IRQ
+                       * immediately after a transmit...
+                       */
+                       if (si->newspeed) {
+                               pxa_irda_set_speed(si, si->newspeed);
+                               si->newspeed = 0;
+                       } else {
+                               /* enable IR Receiver, disable IR Transmitter */
+                               STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
+                               /* enable STUART and receive interrupts */
+                               STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
+                       }
+                       /* I'm hungry! */
+                       netif_wake_queue(dev);
+               }
+               break;
+       }
+
+       return IRQ_HANDLED;
+}
+
+/* FIR Receive DMA interrupt handler */
+static void pxa_irda_fir_dma_rx_irq(int channel, void *data, struct pt_regs *regs)
+{
+       int dcsr = DCSR(channel);
+
+       DCSR(channel) = dcsr & ~DCSR_RUN;
+
+       printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
+}
+
+/* FIR Transmit DMA interrupt handler */
+static void pxa_irda_fir_dma_tx_irq(int channel, void *data, struct pt_regs *regs)
+{
+       struct net_device *dev = data;
+       struct pxa_irda *si = netdev_priv(dev);
+       int dcsr;
+
+       dcsr = DCSR(channel);
+       DCSR(channel) = dcsr & ~DCSR_RUN;
+
+       if (dcsr & DCSR_ENDINTR)  {
+               si->stats.tx_packets++;
+               si->stats.tx_bytes += si->dma_tx_buff_len;
+       } else {
+               si->stats.tx_errors++;
+       }
+
+       while (ICSR1 & ICSR1_TBY)
+               cpu_relax();
+       si->last_oscr = OSCR;
+
+       /*
+        * HACK: It looks like the TBY bit is dropped too soon.
+        * Without this delay things break.
+        */
+       udelay(120);
+
+       if (si->newspeed) {
+               pxa_irda_set_speed(si, si->newspeed);
+               si->newspeed = 0;
+       } else {
+               ICCR0 = 0;
+               pxa_irda_fir_dma_rx_start(si);
+               ICCR0 = ICCR0_ITR | ICCR0_RXE;
+       }
+       netif_wake_queue(dev);
+}
+
+/* EIF(Error in FIFO/End in Frame) handler for FIR */
+static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev)
+{
+       unsigned int len, stat, data;
+
+       /* Get the current data position. */
+       len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
+
+       do {
+               /* Read Status, and then Data.   */
+               stat = ICSR1;
+               rmb();
+               data = ICDR;
+
+               if (stat & (ICSR1_CRE | ICSR1_ROR)) {
+                       si->stats.rx_errors++;
+                       if (stat & ICSR1_CRE) {
+                               printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
+                               si->stats.rx_crc_errors++;
+                       }
+                       if (stat & ICSR1_ROR) {
+                               printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
+                               si->stats.rx_frame_errors++;
+                       }
+               } else  {
+                       si->dma_rx_buff[len++] = data;
+               }
+               /* If we hit the end of frame, there's no point in continuing. */
+               if (stat & ICSR1_EOF)
+                       break;
+       } while (ICSR0 & ICSR0_EIF);
+
+       if (stat & ICSR1_EOF) {
+               /* end of frame. */
+               struct sk_buff *skb = alloc_skb(len+1,GFP_ATOMIC);
+               if (!skb)  {
+                       printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
+                       si->stats.rx_dropped++;
+                       return;
+               }
+
+               /* Align IP header to 20 bytes  */
+               skb_reserve(skb, 1);
+               memcpy(skb->data, si->dma_rx_buff, len);
+               skb_put(skb, len);
+
+               /* Feed it to IrLAP  */
+               skb->dev = dev;
+               skb->mac.raw  = skb->data;
+               skb->protocol = htons(ETH_P_IRDA);
+               netif_rx(skb);
+
+               si->stats.rx_packets++;
+               si->stats.rx_bytes += len;
+
+               dev->last_rx = jiffies;
+       }
+}
+
+/* FIR interrupt handler */
+static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct net_device *dev = dev_id;
+       struct pxa_irda *si = netdev_priv(dev);
+       int icsr0;
+
+       /* stop RX DMA */
+       DCSR(si->rxdma) &= ~DCSR_RUN;
+       si->last_oscr = OSCR;
+       icsr0 = ICSR0;
+
+       if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
+               if (icsr0 & ICSR0_FRE) {
+                       printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
+                       si->stats.rx_frame_errors++;
+               } else {
+                       printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
+                       si->stats.rx_errors++;
+               }
+               ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
+       }
+
+       if (icsr0 & ICSR0_EIF) {
+               /* An error in FIFO occured, or there is a end of frame */
+               pxa_irda_fir_irq_eif(si, dev);
+       }
+
+       ICCR0 = 0;
+       pxa_irda_fir_dma_rx_start(si);
+       ICCR0 = ICCR0_ITR | ICCR0_RXE;
+
+       return IRQ_HANDLED;
+}
+
+/* hard_xmit interface of irda device */
+static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct pxa_irda *si = netdev_priv(dev);
+       int speed = irda_get_next_speed(skb);
+
+       /*
+        * Does this packet contain a request to change the interface
+        * speed?  If so, remember it until we complete the transmission
+        * of this frame.
+        */
+       if (speed != si->speed && speed != -1)
+               si->newspeed = speed;
+
+       /*
+        * If this is an empty frame, we can bypass a lot.
+        */
+       if (skb->len == 0) {
+               if (si->newspeed) {
+                       si->newspeed = 0;
+                       pxa_irda_set_speed(si, speed);
+               }
+               dev_kfree_skb(skb);
+               return 0;
+       }
+
+       netif_stop_queue(dev);
+
+       if (!IS_FIR(si)) {
+               si->tx_buff.data = si->tx_buff.head;
+               si->tx_buff.len  = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
+
+               /* Disable STUART interrupts and switch to transmit mode. */
+               STIER = 0;
+               STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
+
+               /* enable STUART and transmit interrupts */
+               STIER = IER_UUE | IER_TIE;
+       } else {
+               unsigned long mtt = irda_get_mtt(skb);
+
+               si->dma_tx_buff_len = skb->len;
+               memcpy(si->dma_tx_buff, skb->data, skb->len);
+
+               if (mtt)
+                       while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
+                               cpu_relax();
+
+               /* stop RX DMA,  disable FICP */
+               DCSR(si->rxdma) &= ~DCSR_RUN;
+               ICCR0 = 0;
+
+               pxa_irda_fir_dma_tx_start(si);
+               ICCR0 = ICCR0_ITR | ICCR0_TXE;
+       }
+
+       dev_kfree_skb(skb);
+       dev->trans_start = jiffies;
+       return 0;
+}
+
+static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+       struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+       struct pxa_irda *si = netdev_priv(dev);
+       int ret;
+
+       switch (cmd) {
+       case SIOCSBANDWIDTH:
+               ret = -EPERM;
+               if (capable(CAP_NET_ADMIN)) {
+                       /*
+                        * We are unable to set the speed if the
+                        * device is not running.
+                        */
+                       if (netif_running(dev)) {
+                               ret = pxa_irda_set_speed(si,
+                                               rq->ifr_baudrate);
+                       } else {
+                               printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
+                               ret = 0;
+                       }
+               }
+               break;
+
+       case SIOCSMEDIABUSY:
+               ret = -EPERM;
+               if (capable(CAP_NET_ADMIN)) {
+                       irda_device_set_media_busy(dev, TRUE);
+                       ret = 0;
+               }
+               break;
+
+       case SIOCGRECEIVING:
+               ret = 0;
+               rq->ifr_receiving = IS_FIR(si) ? 0
+                                       : si->rx_buff.state != OUTSIDE_FRAME;
+               break;
+
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
+static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
+{
+       struct pxa_irda *si = netdev_priv(dev);
+       return &si->stats;
+}
+
+static void pxa_irda_startup(struct pxa_irda *si)
+{
+       /* Disable STUART interrupts */
+       STIER = 0;
+       /* enable STUART interrupt to the processor */
+       STMCR = MCR_OUT2;
+       /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
+       STLCR = LCR_WLS0 | LCR_WLS1;
+       /* enable FIFO, we use FIFO to improve performance */
+       STFCR = FCR_TRFIFOE | FCR_ITL_32;
+
+       /* disable FICP */
+       ICCR0 = 0;
+       /* configure FICP ICCR2 */
+       ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
+
+       /* configure DMAC */
+       DRCMR17 = si->rxdma | DRCMR_MAPVLD;
+       DRCMR18 = si->txdma | DRCMR_MAPVLD;
+
+       /* force SIR reinitialization */
+       si->speed = 4000000;
+       pxa_irda_set_speed(si, 9600);
+
+       printk(KERN_DEBUG "pxa_ir: irda startup\n");
+}
+
+static void pxa_irda_shutdown(struct pxa_irda *si)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       /* disable STUART and interrupt */
+       STIER = 0;
+       /* disable STUART SIR mode */
+       STISR = 0;
+       /* disable the STUART clock */
+       pxa_set_cken(CKEN5_STUART, 0);
+
+       /* disable DMA */
+       DCSR(si->txdma) &= ~DCSR_RUN;
+       DCSR(si->rxdma) &= ~DCSR_RUN;
+       /* disable FICP */
+       ICCR0 = 0;
+       /* disable the FICP clock */
+       pxa_set_cken(CKEN13_FICP, 0);
+
+       DRCMR17 = 0;
+       DRCMR18 = 0;
+
+       local_irq_restore(flags);
+
+       /* power off board transceiver */
+       si->pdata->transceiver_mode(si->dev, IR_OFF);
+
+       printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
+}
+
+static int pxa_irda_start(struct net_device *dev)
+{
+       struct pxa_irda *si = netdev_priv(dev);
+       int err;
+
+       si->speed = 9600;
+
+       err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
+       if (err)
+               goto err_irq1;
+
+       err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
+       if (err)
+               goto err_irq2;
+
+       /*
+        * The interrupt must remain disabled for now.
+        */
+       disable_irq(IRQ_STUART);
+       disable_irq(IRQ_ICP);
+
+       err = -EBUSY;
+       si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
+       if (si->rxdma < 0)
+               goto err_rx_dma;
+
+       si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
+       if (si->txdma < 0)
+               goto err_tx_dma;
+
+       err = -ENOMEM;
+       si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
+                                            &si->dma_rx_buff_phy, GFP_KERNEL );
+       if (!si->dma_rx_buff)
+               goto err_dma_rx_buff;
+
+       si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
+                                            &si->dma_tx_buff_phy, GFP_KERNEL );
+       if (!si->dma_tx_buff)
+               goto err_dma_tx_buff;
+
+       /* Setup the serial port for the initial speed. */
+       pxa_irda_startup(si);
+
+       /*
+        * Open a new IrLAP layer instance.
+        */
+       si->irlap = irlap_open(dev, &si->qos, "pxa");
+       err = -ENOMEM;
+       if (!si->irlap)
+               goto err_irlap;
+
+       /*
+        * Now enable the interrupt and start the queue
+        */
+       enable_irq(IRQ_STUART);
+       enable_irq(IRQ_ICP);
+       netif_start_queue(dev);
+
+       printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
+
+       return 0;
+
+err_irlap:
+       pxa_irda_shutdown(si);
+       dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
+err_dma_tx_buff:
+       dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
+err_dma_rx_buff:
+       pxa_free_dma(si->txdma);
+err_tx_dma:
+       pxa_free_dma(si->rxdma);
+err_rx_dma:
+       free_irq(IRQ_ICP, dev);
+err_irq2:
+       free_irq(IRQ_STUART, dev);
+err_irq1:
+
+       return err;
+}
+
+static int pxa_irda_stop(struct net_device *dev)
+{
+       struct pxa_irda *si = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+
+       pxa_irda_shutdown(si);
+
+       /* Stop IrLAP */
+       if (si->irlap) {
+               irlap_close(si->irlap);
+               si->irlap = NULL;
+       }
+
+       free_irq(IRQ_STUART, dev);
+       free_irq(IRQ_ICP, dev);
+
+       pxa_free_dma(si->rxdma);
+       pxa_free_dma(si->txdma);
+
+       if (si->dma_rx_buff)
+               dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
+       if (si->dma_tx_buff)
+               dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
+
+       printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
+       return 0;
+}
+
+static int pxa_irda_suspend(struct device *_dev, pm_message_t state, u32 level)
+{
+       struct net_device *dev = dev_get_drvdata(_dev);
+       struct pxa_irda *si;
+
+       if (!dev || level != SUSPEND_DISABLE)
+               return 0;
+
+       if (netif_running(dev)) {
+               si = netdev_priv(dev);
+               netif_device_detach(dev);
+               pxa_irda_shutdown(si);
+       }
+
+       return 0;
+}
+
+static int pxa_irda_resume(struct device *_dev, u32 level)
+{
+       struct net_device *dev = dev_get_drvdata(_dev);
+       struct pxa_irda *si;
+
+       if (!dev || level != RESUME_ENABLE)
+               return 0;
+
+       if (netif_running(dev)) {
+               si = netdev_priv(dev);
+               pxa_irda_startup(si);
+               netif_device_attach(dev);
+               netif_wake_queue(dev);
+       }
+
+       return 0;
+}
+
+
+static int pxa_irda_init_iobuf(iobuff_t *io, int size)
+{
+       io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+       if (io->head != NULL) {
+               io->truesize = size;
+               io->in_frame = FALSE;
+               io->state    = OUTSIDE_FRAME;
+               io->data     = io->head;
+       }
+       return io->head ? 0 : -ENOMEM;
+}
+
+static int pxa_irda_probe(struct device *_dev)
+{
+       struct platform_device *pdev = to_platform_device(_dev);
+       struct net_device *dev;
+       struct pxa_irda *si;
+       unsigned int baudrate_mask;
+       int err;
+
+       if (!pdev->dev.platform_data)
+               return -ENODEV;
+
+       err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
+       if (err)
+               goto err_mem_1;
+
+       err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
+       if (err)
+               goto err_mem_2;
+
+       dev = alloc_irdadev(sizeof(struct pxa_irda));
+       if (!dev)
+               goto err_mem_3;
+
+       si = netdev_priv(dev);
+       si->dev = &pdev->dev;
+       si->pdata = pdev->dev.platform_data;
+
+       /*
+        * Initialise the SIR buffers
+        */
+       err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
+       if (err)
+               goto err_mem_4;
+       err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
+       if (err)
+               goto err_mem_5;
+
+       dev->hard_start_xmit    = pxa_irda_hard_xmit;
+       dev->open               = pxa_irda_start;
+       dev->stop               = pxa_irda_stop;
+       dev->do_ioctl           = pxa_irda_ioctl;
+       dev->get_stats          = pxa_irda_stats;
+
+       irda_init_max_qos_capabilies(&si->qos);
+
+       baudrate_mask = 0;
+       if (si->pdata->transceiver_cap & IR_SIRMODE)
+               baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+       if (si->pdata->transceiver_cap & IR_FIRMODE)
+               baudrate_mask |= IR_4000000 << 8;
+
+       si->qos.baud_rate.bits &= baudrate_mask;
+       si->qos.min_turn_time.bits = 7;  /* 1ms or more */
+
+       irda_qos_bits_to_value(&si->qos);
+
+       err = register_netdev(dev);
+
+       if (err == 0)
+               dev_set_drvdata(&pdev->dev, dev);
+
+       if (err) {
+               kfree(si->tx_buff.head);
+err_mem_5:
+               kfree(si->rx_buff.head);
+err_mem_4:
+               free_netdev(dev);
+err_mem_3:
+               release_mem_region(__PREG(FICP), 0x1c);
+err_mem_2:
+               release_mem_region(__PREG(STUART), 0x24);
+       }
+err_mem_1:
+       return err;
+}
+
+static int pxa_irda_remove(struct device *_dev)
+{
+       struct net_device *dev = dev_get_drvdata(_dev);
+
+       if (dev) {
+               struct pxa_irda *si = netdev_priv(dev);
+               unregister_netdev(dev);
+               kfree(si->tx_buff.head);
+               kfree(si->rx_buff.head);
+               free_netdev(dev);
+       }
+
+       release_mem_region(__PREG(STUART), 0x24);
+       release_mem_region(__PREG(FICP), 0x1c);
+
+       return 0;
+}
+
+static struct device_driver pxa_ir_driver = {
+       .name           = "pxa2xx-ir",
+       .bus            = &platform_bus_type,
+       .probe          = pxa_irda_probe,
+       .remove         = pxa_irda_remove,
+       .suspend        = pxa_irda_suspend,
+       .resume         = pxa_irda_resume,
+};
+
+static int __init pxa_irda_init(void)
+{
+       return driver_register(&pxa_ir_driver);
+}
+
+static void __exit pxa_irda_exit(void)
+{
+       driver_unregister(&pxa_ir_driver);
+}
+
+module_init(pxa_irda_init);
+module_exit(pxa_irda_exit);
+
+MODULE_LICENSE("GPL");
index 15f207323d97043b124dd6282835447804abf5aa..3961a754e920e37e8a1b310670edcdb48549a46c 100644 (file)
@@ -678,10 +678,9 @@ static void turnaround_delay(const struct stir_cb *stir, long us)
                return;
 
        ticks = us / (1000000 / HZ);
-       if (ticks > 0) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1 + ticks);
-       } else
+       if (ticks > 0)
+               schedule_timeout_interruptible(1 + ticks);
+       else
                udelay(us);
 }
 
index 9d026ed77ddd4250660ad117d76c2e319e6d3eda..04e47189d830105a848d6b6db71c8dd26f625de9 100644 (file)
@@ -645,11 +645,10 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
 
        mod_timer(&adapter->blink_timer, jiffies);
 
-       set_current_state(TASK_INTERRUPTIBLE);
-       if(data)
-               schedule_timeout(data * HZ);
+       if (data)
+               schedule_timeout_interruptible(data * HZ);
        else
-               schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+               schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
 
        del_timer_sync(&adapter->blink_timer);
        ixgb_led_off(&adapter->hw);
@@ -723,6 +722,7 @@ struct ethtool_ops ixgb_ethtool_ops = {
        .phys_id = ixgb_phys_id,
        .get_stats_count = ixgb_get_stats_count,
        .get_ethtool_stats = ixgb_get_ethtool_stats,
+       .get_perm_addr = ethtool_op_get_perm_addr,
 };
 
 void ixgb_set_ethtool_ops(struct net_device *netdev)
index 89d6d69be382a3d625892823d8859cbae8237908..176680cb153e8b114797f4e6a8bad2f1837c678c 100644 (file)
@@ -460,8 +460,9 @@ ixgb_probe(struct pci_dev *pdev,
        }
 
        ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+       memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
 
-       if(!is_valid_ether_addr(netdev->dev_addr)) {
+       if(!is_valid_ether_addr(netdev->perm_addr)) {
                err = -EIO;
                goto err_eeprom;
        }
index b4929beb33b2359d29a37fc16e4535590c1098e9..1d75ca0bb939429587b354ff6cd8e3f4463b247c 100644 (file)
@@ -298,7 +298,7 @@ enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_
 static unsigned char lance_need_isa_bounce_buffers = 1;
 
 static int lance_open(struct net_device *dev);
-static void lance_init_ring(struct net_device *dev, int mode);
+static void lance_init_ring(struct net_device *dev, gfp_t mode);
 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static int lance_rx(struct net_device *dev);
 static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
@@ -846,7 +846,7 @@ lance_purge_ring(struct net_device *dev)
 
 /* Initialize the LANCE Rx and Tx rings. */
 static void
-lance_init_ring(struct net_device *dev, int gfp)
+lance_init_ring(struct net_device *dev, gfp_t gfp)
 {
        struct lance_private *lp = dev->priv;
        int i;
index 27f0d8ac4c40d99a869e2cd297007dbeeb487d0e..309d254842cf2f85ab2d0ea7653b98d4132b7248 100644 (file)
@@ -298,7 +298,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
        return 0;
 unmap:
        if (ei_status.reg0)
-               iounmap((void *)dev->mem_start);
+               iounmap(ei_status.mem);
 cleanup:
        free_irq(dev->irq, dev);
        return ret;
index c33cb3dc942b713f993f7ae75f5bede82c5cacc9..e42aa797f08b88ca9f9b71a065e1dab90d4047cd 100644 (file)
@@ -207,6 +207,20 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
        return 0;
 }
 
+int mii_check_gmii_support(struct mii_if_info *mii)
+{
+       int reg;
+
+       reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+       if (reg & BMSR_ESTATEN) {
+               reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS);
+               if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF))
+                       return 1;
+       }
+
+       return 0;
+}
+
 int mii_link_ok (struct mii_if_info *mii)
 {
        /* first, a dummy read, needed to latch some MII phys */
@@ -394,5 +408,6 @@ EXPORT_SYMBOL(mii_ethtool_gset);
 EXPORT_SYMBOL(mii_ethtool_sset);
 EXPORT_SYMBOL(mii_check_link);
 EXPORT_SYMBOL(mii_check_media);
+EXPORT_SYMBOL(mii_check_gmii_support);
 EXPORT_SYMBOL(generic_mii_ioctl);
 
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
new file mode 100644 (file)
index 0000000..f79f7ee
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define DEBUG
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include <asm/mips-boards/simint.h>
+
+#include "mipsnet.h"           /* actual device IO mapping */
+
+#define MIPSNET_VERSION "2005-06-20"
+
+#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field))
+
+struct mipsnet_priv {
+       struct net_device_stats stats;
+};
+
+static struct platform_device *mips_plat_dev;
+
+static char mipsnet_string[] = "mipsnet";
+
+/*
+ * Copy data from the MIPSNET rx data port
+ */
+static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
+                       int len)
+{
+       uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
+       if (available_len < len)
+               return -EFAULT;
+
+       for (; len > 0; len--, kdata++) {
+               *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
+       }
+
+       return inl(mipsnet_reg_address(dev, rxDataCount));
+}
+
+static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
+       struct sk_buff *skb)
+{
+       int count_to_go = skb->len;
+       char *buf_ptr = skb->data;
+       struct mipsnet_priv *mp = netdev_priv(dev);
+
+       pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n",
+                dev->name, __FUNCTION__, skb->len);
+
+       outl(skb->len, mipsnet_reg_address(dev, txDataCount));
+
+       pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n",
+                dev->name, __FUNCTION__, skb->len);
+
+       for (; count_to_go; buf_ptr++, count_to_go--) {
+               outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
+       }
+
+       mp->stats.tx_packets++;
+       mp->stats.tx_bytes += skb->len;
+
+       return skb->len;
+}
+
+static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       pr_debug("%s:%s(): transmitting %d bytes\n",
+                dev->name, __FUNCTION__, skb->len);
+
+       /* Only one packet at a time. Once TXDONE interrupt is serviced, the
+        * queue will be restarted.
+        */
+       netif_stop_queue(dev);
+       mipsnet_put_todevice(dev, skb);
+
+       return 0;
+}
+
+static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
+{
+       struct sk_buff *skb;
+       size_t len = count;
+       struct mipsnet_priv *mp = netdev_priv(dev);
+
+       if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) {
+               mp->stats.rx_dropped++;
+               return -ENOMEM;
+       }
+
+       skb_reserve(skb, 2);
+       if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
+               return -EFAULT;
+
+       skb->dev = dev;
+       skb->protocol = eth_type_trans(skb, dev);
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       pr_debug("%s:%s(): pushing RXed data to kernel\n",
+                dev->name, __FUNCTION__);
+       netif_rx(skb);
+
+       mp->stats.rx_packets++;
+       mp->stats.rx_bytes += len;
+
+       return count;
+}
+
+static irqreturn_t
+mipsnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct net_device *dev = dev_id;
+
+       irqreturn_t retval = IRQ_NONE;
+       uint64_t interruptFlags;
+
+       if (irq == dev->irq) {
+               pr_debug("%s:%s(): irq %d for device\n",
+                        dev->name, __FUNCTION__, irq);
+
+               retval = IRQ_HANDLED;
+
+               interruptFlags =
+                   inl(mipsnet_reg_address(dev, interruptControl));
+               pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name,
+                        __FUNCTION__, interruptFlags);
+
+               if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
+                       pr_debug("%s:%s(): got TXDone\n",
+                                dev->name, __FUNCTION__);
+                       outl(MIPSNET_INTCTL_TXDONE,
+                            mipsnet_reg_address(dev, interruptControl));
+                       // only one packet at a time, we are done.
+                       netif_wake_queue(dev);
+               } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
+                       pr_debug("%s:%s(): got RX data\n",
+                                dev->name, __FUNCTION__);
+                       mipsnet_get_fromdev(dev,
+                                   inl(mipsnet_reg_address(dev, rxDataCount)));
+                       pr_debug("%s:%s(): clearing RX int\n",
+                                dev->name, __FUNCTION__);
+                       outl(MIPSNET_INTCTL_RXDONE,
+                            mipsnet_reg_address(dev, interruptControl));
+
+               } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
+                       pr_debug("%s:%s(): got test interrupt\n",
+                                dev->name, __FUNCTION__);
+                       // TESTBIT is cleared on read.
+                       //    And takes effect after a write with 0
+                       outl(0, mipsnet_reg_address(dev, interruptControl));
+               } else {
+                       pr_debug("%s:%s(): no valid fags 0x%016llx\n",
+                                dev->name, __FUNCTION__, interruptFlags);
+                       // Maybe shared IRQ, just ignore, no clearing.
+                       retval = IRQ_NONE;
+               }
+
+       } else {
+               printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
+                      dev->name, __FUNCTION__, irq);
+               retval = IRQ_NONE;
+       }
+       return retval;
+}                              //mipsnet_interrupt()
+
+static int mipsnet_open(struct net_device *dev)
+{
+       int err;
+       pr_debug("%s: mipsnet_open\n", dev->name);
+
+       err = request_irq(dev->irq, &mipsnet_interrupt,
+                         SA_SHIRQ, dev->name, (void *) dev);
+
+       if (err) {
+               pr_debug("%s: %s(): can't get irq %d\n",
+                        dev->name, __FUNCTION__, dev->irq);
+               release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+               return err;
+       }
+
+       pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n",
+                dev->name, __FUNCTION__, dev->base_addr, dev->irq);
+
+
+       netif_start_queue(dev);
+
+       // test interrupt handler
+       outl(MIPSNET_INTCTL_TESTBIT,
+            mipsnet_reg_address(dev, interruptControl));
+
+
+       return 0;
+}
+
+static int mipsnet_close(struct net_device *dev)
+{
+       pr_debug("%s: %s()\n", dev->name, __FUNCTION__);
+       netif_stop_queue(dev);
+       return 0;
+}
+
+static struct net_device_stats *mipsnet_get_stats(struct net_device *dev)
+{
+       struct mipsnet_priv *mp = netdev_priv(dev);
+
+       return &mp->stats;
+}
+
+static void mipsnet_set_mclist(struct net_device *dev)
+{
+       // we don't do anything
+       return;
+}
+
+static int __init mipsnet_probe(struct device *dev)
+{
+       struct net_device *netdev;
+       int err;
+
+       netdev = alloc_etherdev(sizeof(struct mipsnet_priv));
+       if (!netdev) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       dev_set_drvdata(dev, netdev);
+
+       netdev->open                    = mipsnet_open;
+       netdev->stop                    = mipsnet_close;
+       netdev->hard_start_xmit         = mipsnet_xmit;
+       netdev->get_stats               = mipsnet_get_stats;
+       netdev->set_multicast_list      = mipsnet_set_mclist;
+
+       /*
+        * TODO: probe for these or load them from PARAM
+        */
+       netdev->base_addr = 0x4200;
+       netdev->irq = MIPSCPU_INT_BASE + MIPSCPU_INT_MB0 +
+                     inl(mipsnet_reg_address(netdev, interruptInfo));
+
+       // Get the io region now, get irq on open()
+       if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
+               pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} "
+                        "for dev is not availble.\n", netdev->name,
+                        __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT);
+               err = -EBUSY;
+               goto out_free_netdev;
+       }
+
+       /*
+        * Lacking any better mechanism to allocate a MAC address we use a
+        * random one ...
+        */
+       random_ether_addr(netdev->dev_addr);
+
+       err = register_netdev(netdev);
+       if (err) {
+               printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
+               goto out_free_region;
+       }
+
+       return 0;
+
+out_free_region:
+       release_region(netdev->base_addr, MIPSNET_IO_EXTENT);
+
+out_free_netdev:
+       free_netdev(netdev);
+
+out:
+       return err;
+}
+
+static int __devexit mipsnet_device_remove(struct device *device)
+{
+       struct net_device *dev = dev_get_drvdata(device);
+
+       unregister_netdev(dev);
+       release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+       free_netdev(dev);
+       dev_set_drvdata(device, NULL);
+
+       return 0;
+}
+
+static struct device_driver mipsnet_driver = {
+       .name   = mipsnet_string,
+       .bus    = &platform_bus_type,
+       .probe  = mipsnet_probe,
+       .remove = __devexit_p(mipsnet_device_remove),
+};
+
+static void mipsnet_platform_release(struct device *device)
+{
+       struct platform_device *pldev;
+
+       /* free device */
+       pldev = to_platform_device(device);
+       kfree(pldev);
+}
+
+static int __init mipsnet_init_module(void)
+{
+       struct platform_device *pldev;
+       int err;
+
+       printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
+              "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
+
+       if (driver_register(&mipsnet_driver)) {
+               printk(KERN_ERR "Driver registration failed\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+        if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
+               err = -ENOMEM;
+               goto out_unregister_driver;
+       }
+
+       memset (pldev, 0, sizeof (*pldev));
+       pldev->name             = mipsnet_string;
+       pldev->id               = 0;
+       pldev->dev.release      = mipsnet_platform_release;
+
+       if (platform_device_register(pldev)) {
+               err = -ENODEV;
+               goto out_free_pldev;
+       }
+
+        if (!pldev->dev.driver) {
+               /*
+                * The driver was not bound to this device, there was
+                 * no hardware at this address. Unregister it, as the
+                * release fuction will take care of freeing the
+                * allocated structure
+                */
+               platform_device_unregister (pldev);
+       }
+
+       mips_plat_dev           = pldev;
+
+       return 0;
+
+out_free_pldev:
+       kfree(pldev);
+
+out_unregister_driver:
+       driver_unregister(&mipsnet_driver);
+out:
+       return err;
+}
+
+static void __exit mipsnet_exit_module(void)
+{
+       pr_debug("MIPSNet Ethernet driver exiting\n");
+
+       driver_unregister(&mipsnet_driver);
+}
+
+module_init(mipsnet_init_module);
+module_exit(mipsnet_exit_module);
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h
new file mode 100644 (file)
index 0000000..8785359
--- /dev/null
@@ -0,0 +1,127 @@
+//
+// <COPYRIGHT CLASS="1B" YEAR="2005">
+// Unpublished work (c) MIPS Technologies, Inc.  All rights reserved.
+// Unpublished rights reserved under the copyright laws of the U.S.A. and
+//  other countries.
+//
+// PROPRIETARY / SECRET CONFIDENTIAL INFORMATION OF MIPS TECHNOLOGIES, INC.
+// FOR INTERNAL USE ONLY.
+//
+// Under no circumstances (contract or otherwise) may this information be
+// disclosed to, or copied, modified or used by anyone other than employees
+// or contractors of MIPS Technologies having a need to know.
+// </COPYRIGHT>
+//
+//++
+// File: MIPS_Net.h
+//
+// Description:
+//   The definition of the emulated MIPSNET device's interface.
+//
+// Notes: This include file needs to work from a Linux device drivers.
+//
+//--
+//
+
+#ifndef __MIPSNET_H
+#define __MIPSNET_H
+
+/*
+ *  Id of this Net device, as seen by the core.
+ */
+#define MIPS_NET_DEV_ID ((uint64_t)           \
+                            ((uint64_t)'M'<< 0)| \
+                            ((uint64_t)'I'<< 8)| \
+                            ((uint64_t)'P'<<16)| \
+                            ((uint64_t)'S'<<24)| \
+                            ((uint64_t)'N'<<32)| \
+                            ((uint64_t)'E'<<40)| \
+                            ((uint64_t)'T'<<48)| \
+                            ((uint64_t)'0'<<56))
+
+/*
+ * Net status/control block as seen by sw in the core.
+ * (Why not use bit fields? can't be bothered with cross-platform struct
+ *  packing.)
+ */
+typedef struct _net_control_block {
+       /// dev info for probing
+       ///  reads as MIPSNET%d where %d is some form of version
+       uint64_t devId;         /*0x00 */
+
+       /*
+        * read only busy flag.
+        * Set and cleared by the Net Device to indicate that an rx or a tx
+        * is in progress.
+        */
+       uint32_t busy;          /*0x08 */
+
+       /*
+        * Set by the Net Device.
+        * The device will set it once data has been received.
+        * The value is the number of bytes that should be read from
+        * rxDataBuffer.  The value will decrease till 0 until all the data
+        * from rxDataBuffer has been read.
+        */
+       uint32_t rxDataCount;   /*0x0c */
+#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
+
+       /*
+        * Settable from the MIPS core, cleared by the Net Device.
+        * The core should set the number of bytes it wants to send,
+        *   then it should write those bytes of data to txDataBuffer.
+        * The device will clear txDataCount has been processed (not necessarily sent).
+        */
+       uint32_t txDataCount;   /*0x10 */
+
+       /*
+        * Interrupt control
+        *
+        * Used to clear the interrupted generated by this dev.
+        * Write a 1 to clear the interrupt. (except bit31).
+        *
+        * Bit0 is set if it was a tx-done interrupt.
+        * Bit1 is set when new rx-data is available.
+        *      Until this bit is cleared there will be no other RXs.
+        *
+        * Bit31 is used for testing, it clears after a read.
+        *    Writing 1 to this bit will cause an interrupt to be generated.
+        *    To clear the test interrupt, write 0 to this register.
+        */
+       uint32_t interruptControl;      /*0x14 */
+#define MIPSNET_INTCTL_TXDONE     ((uint32_t)(1<< 0))
+#define MIPSNET_INTCTL_RXDONE     ((uint32_t)(1<< 1))
+#define MIPSNET_INTCTL_TESTBIT    ((uint32_t)(1<<31))
+#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE|MIPSNET_INTCTL_RXDONE|MIPSNET_INTCTL_TESTBIT)
+
+       /*
+        * Readonly core-specific interrupt info for the device to signal the core.
+        * The meaning of the contents of this field might change.
+        */
+       /*###\todo: the whole memIntf interrupt scheme is messy: the device should have
+        *  no control what so ever of what VPE/register set is being used.
+        *  The MemIntf should only expose interrupt lines, and something in the
+        *  config should be responsible for the line<->core/vpe bindings.
+        */
+       uint32_t interruptInfo; /*0x18 */
+
+       /*
+        *  This is where the received data is read out.
+        *  There is more data to read until rxDataReady is 0.
+        *  Only 1 byte at this regs offset is used.
+        */
+       uint32_t rxDataBuffer;  /*0x1c */
+
+       /*
+        * This is where the data to transmit is written.
+        * Data should be written for the amount specified in the txDataCount register.
+        *  Only 1 byte at this regs offset is used.
+        */
+       uint32_t txDataBuffer;  /*0x20 */
+} MIPS_T_NetControl;
+
+#define MIPSNET_IO_EXTENT 0x40 /* being generous */
+
+#define field_offset(field) ((int)&((MIPS_T_NetControl*)(0))->field)
+
+#endif /* __MIPSNET_H */
index f0996ce5c268698b060a784fbe1ab0661817b577..6c86dca62e2a565b6d611178d3894ee7120d9874 100644 (file)
@@ -277,7 +277,7 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq)
        struct recvq __iomem *rq = mp->rq;
        struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
        struct net_device *dev = mp->dev;
-       int gfp_flags = GFP_KERNEL;
+       gfp_t gfp_flags = GFP_KERNEL;
        int i;
 
        if (from_irq || in_interrupt())
index 9391e55a5e92488d247ae41875e8bf065a7bd93b..47722f708a41822da4da40fc88f0e4752235fd5c 100644 (file)
@@ -296,7 +296,7 @@ struct myri_eth {
 /* We use this to acquire receive skb's that we can DMA directly into. */
 #define ALIGNED_RX_SKB_ADDR(addr) \
         ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
-static inline struct sk_buff *myri_alloc_skb(unsigned int length, int gfp_flags)
+static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags)
 {
        struct sk_buff *skb;
 
index d209a1556b2ebcb355a0110759c6c74190864123..0de8fdd2aa86f338fa026e7c308cb3ca6be2b13c 100644 (file)
@@ -54,6 +54,10 @@ static const char version2[] =
 #include <asm/system.h>
 #include <asm/io.h>
 
+#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+#include <asm/tx4938/rbtx4938.h>
+#endif
+
 #include "8390.h"
 
 #define DRV_NAME "ne"
@@ -111,6 +115,9 @@ bad_clone_list[] __initdata = {
     {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
     {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
     {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
+#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+    {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}},  /* Toshiba built-in */
+#endif
     {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
     {NULL,}
 };
@@ -226,6 +233,10 @@ struct net_device * __init ne_probe(int unit)
        sprintf(dev->name, "eth%d", unit);
        netdev_boot_setup_check(dev);
 
+#ifdef CONFIG_TOSHIBA_RBTX4938
+       dev->base_addr = 0x07f20280;
+       dev->irq = RBTX4938_RTL_8019_IRQ;
+#endif
        err = do_ne_probe(dev);
        if (err)
                goto out;
@@ -506,6 +517,10 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
        ei_status.name = name;
        ei_status.tx_start_page = start_page;
        ei_status.stop_page = stop_page;
+#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+       wordlength = 1;
+#endif
+
 #ifdef CONFIG_PLAT_OAKS32R
        ei_status.word16 = 0;
 #else
index f1c01ac2910239206fc892516057ef4bf85bb0c3..e531a4eedfeee3fd046beb4c7df5e3cb03be0da8 100644 (file)
@@ -372,6 +372,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
                printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
                dev->dev_addr[i] = SA_prom[i];
        }
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        return 0;
 
@@ -637,6 +638,7 @@ static struct ethtool_ops ne2k_pci_ethtool_ops = {
        .get_drvinfo            = ne2k_pci_get_drvinfo,
        .get_tx_csum            = ethtool_op_get_tx_csum,
        .get_sg                 = ethtool_op_get_sg,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
index e4811b42a6b7fe39ff8b3d1dee28f075403901e6..a3c3fc9c0d8a8ec241b452dde7f4aba968b3bb1d 100644 (file)
@@ -1632,8 +1632,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab
                        timed_out = 1;
                        break;
                }
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
        }
 
        if (status & fail)
index 113b68099216b4aeac8af443d690e21cd1c15cb4..70fe81a89df958ab72b540924383a164a0a56c00 100644 (file)
@@ -22,8 +22,8 @@
  *************************************************************************/
 
 #define DRV_NAME       "pcnet32"
-#define DRV_VERSION    "1.30j"
-#define DRV_RELDATE    "29.04.2005"
+#define DRV_VERSION    "1.31a"
+#define DRV_RELDATE    "12.Sep.2005"
 #define PFX            DRV_NAME ": "
 
 static const char *version =
@@ -257,6 +257,9 @@ static int homepna[MAX_UNITS];
  * v1.30h  24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
  * v1.30i  28 Jun 2004 Don Fry change to use module_param.
  * v1.30j  29 Apr 2005 Don Fry fix skb/map leak with loopback test.
+ * v1.31   02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
+ * v1.31a  12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
+ *        to allow loopback test to work unchanged.
  */
 
 
@@ -266,17 +269,17 @@ static int homepna[MAX_UNITS];
  * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
  */
 #ifndef PCNET32_LOG_TX_BUFFERS
-#define PCNET32_LOG_TX_BUFFERS 4
-#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_TX_BUFFERS         4
+#define PCNET32_LOG_RX_BUFFERS         5
+#define PCNET32_LOG_MAX_TX_BUFFERS     9       /* 2^9 == 512 */
+#define PCNET32_LOG_MAX_RX_BUFFERS     9
 #endif
 
 #define TX_RING_SIZE           (1 << (PCNET32_LOG_TX_BUFFERS))
-#define TX_RING_MOD_MASK       (TX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS       ((PCNET32_LOG_TX_BUFFERS) << 12)
+#define TX_MAX_RING_SIZE       (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
 
 #define RX_RING_SIZE           (1 << (PCNET32_LOG_RX_BUFFERS))
-#define RX_RING_MOD_MASK       (RX_RING_SIZE - 1)
-#define RX_RING_LEN_BITS       ((PCNET32_LOG_RX_BUFFERS) << 4)
+#define RX_MAX_RING_SIZE       (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
 
 #define PKT_BUF_SZ             1544
 
@@ -334,14 +337,14 @@ struct pcnet32_access {
 };
 
 /*
- * The first three fields of pcnet32_private are read by the ethernet device
- * so we allocate the structure should be allocated by pci_alloc_consistent().
+ * The first field of pcnet32_private is read by the ethernet device
+ * so the structure should be allocated using pci_alloc_consistent().
  */
 struct pcnet32_private {
-    /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
-    struct pcnet32_rx_head    rx_ring[RX_RING_SIZE];
-    struct pcnet32_tx_head    tx_ring[TX_RING_SIZE];
     struct pcnet32_init_block init_block;
+    /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+    struct pcnet32_rx_head    *rx_ring;
+    struct pcnet32_tx_head    *tx_ring;
     dma_addr_t         dma_addr;       /* DMA address of beginning of this
                                           object, returned by
                                           pci_alloc_consistent */
@@ -349,13 +352,21 @@ struct pcnet32_private {
                                           structure */
     const char         *name;
     /* The saved address of a sent-in-place packet/buffer, for skfree(). */
-    struct sk_buff     *tx_skbuff[TX_RING_SIZE];
-    struct sk_buff     *rx_skbuff[RX_RING_SIZE];
-    dma_addr_t         tx_dma_addr[TX_RING_SIZE];
-    dma_addr_t         rx_dma_addr[RX_RING_SIZE];
+    struct sk_buff     **tx_skbuff;
+    struct sk_buff     **rx_skbuff;
+    dma_addr_t         *tx_dma_addr;
+    dma_addr_t         *rx_dma_addr;
     struct pcnet32_access      a;
     spinlock_t         lock;           /* Guard lock */
     unsigned int       cur_rx, cur_tx; /* The next free ring entry */
+    unsigned int       rx_ring_size;   /* current rx ring size */
+    unsigned int       tx_ring_size;   /* current tx ring size */
+    unsigned int       rx_mod_mask;    /* rx ring modular mask */
+    unsigned int       tx_mod_mask;    /* tx ring modular mask */
+    unsigned short     rx_len_bits;
+    unsigned short     tx_len_bits;
+    dma_addr_t         rx_ring_dma_addr;
+    dma_addr_t         tx_ring_dma_addr;
     unsigned int       dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
     struct net_device_stats stats;
     char               tx_full;
@@ -397,6 +408,9 @@ static int pcnet32_get_regs_len(struct net_device *dev);
 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        void *ptr);
 static void pcnet32_purge_tx_ring(struct net_device *dev);
+static int pcnet32_alloc_ring(struct net_device *dev);
+static void pcnet32_free_ring(struct net_device *dev);
+
 
 enum pci_flags_bit {
     PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@@ -613,10 +627,62 @@ static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringpar
 {
     struct pcnet32_private *lp = dev->priv;
 
-    ering->tx_max_pending = TX_RING_SIZE - 1;
-    ering->tx_pending = lp->cur_tx - lp->dirty_tx;
-    ering->rx_max_pending = RX_RING_SIZE - 1;
-    ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK;
+    ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
+    ering->tx_pending = lp->tx_ring_size - 1;
+    ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
+    ering->rx_pending = lp->rx_ring_size - 1;
+}
+
+static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+    struct pcnet32_private *lp = dev->priv;
+    unsigned long flags;
+    int i;
+
+    if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+       return -EINVAL;
+
+    if (netif_running(dev))
+       pcnet32_close(dev);
+
+    spin_lock_irqsave(&lp->lock, flags);
+    pcnet32_free_ring(dev);
+    lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
+    lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
+
+    /* set the minimum ring size to 4, to allow the loopback test to work
+     * unchanged.
+     */
+    for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+       if (lp->tx_ring_size <= (1 << i))
+           break;
+    }
+    lp->tx_ring_size = (1 << i);
+    lp->tx_mod_mask = lp->tx_ring_size - 1;
+    lp->tx_len_bits = (i << 12);
+
+    for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+       if (lp->rx_ring_size <= (1 << i))
+           break;
+    }
+    lp->rx_ring_size = (1 << i);
+    lp->rx_mod_mask = lp->rx_ring_size - 1;
+    lp->rx_len_bits = (i << 4);
+
+    if (pcnet32_alloc_ring(dev)) {
+       pcnet32_free_ring(dev);
+       return -ENOMEM;
+    }
+
+    spin_unlock_irqrestore(&lp->lock, flags);
+
+    if (pcnet32_debug & NETIF_MSG_DRV)
+       printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size);
+
+    if (netif_running(dev))
+       pcnet32_open(dev);
+
+    return 0;
 }
 
 static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -948,6 +1014,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
     .nway_reset                = pcnet32_nway_reset,
     .get_link          = pcnet32_get_link,
     .get_ringparam     = pcnet32_get_ringparam,
+    .set_ringparam     = pcnet32_set_ringparam,
     .get_tx_csum       = ethtool_op_get_tx_csum,
     .get_sg            = ethtool_op_get_sg,
     .get_tso           = ethtool_op_get_tso,
@@ -957,6 +1024,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
     .phys_id           = pcnet32_phys_id,
     .get_regs_len      = pcnet32_get_regs_len,
     .get_regs          = pcnet32_get_regs,
+    .get_perm_addr     = ethtool_op_get_perm_addr,
 };
 
 /* only probes for non-PCI devices, the rest are handled by
@@ -1185,9 +1253,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
            memcpy(dev->dev_addr, promaddr, 6);
        }
     }
+    memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
     /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
-    if (!is_valid_ether_addr(dev->dev_addr))
+    if (!is_valid_ether_addr(dev->perm_addr))
        memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
 
     if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1239,6 +1308,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
     dev->priv = lp;
     lp->name = chipname;
     lp->shared_irq = shared;
+    lp->tx_ring_size = TX_RING_SIZE;           /* default tx ring size */
+    lp->rx_ring_size = RX_RING_SIZE;           /* default rx ring size */
+    lp->tx_mod_mask = lp->tx_ring_size - 1;
+    lp->rx_mod_mask = lp->rx_ring_size - 1;
+    lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+    lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
     lp->mii_if.full_duplex = fdx;
     lp->mii_if.phy_id_mask = 0x1f;
     lp->mii_if.reg_num_mask = 0x1f;
@@ -1265,21 +1340,23 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
     }
     lp->a = *a;
 
+    if (pcnet32_alloc_ring(dev)) {
+       ret = -ENOMEM;
+       goto err_free_ring;
+    }
     /* detect special T1/E1 WAN card by checking for MAC address */
     if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
            && dev->dev_addr[2] == 0x75)
        lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
 
     lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
-    lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+    lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
     for (i = 0; i < 6; i++)
        lp->init_block.phys_addr[i] = dev->dev_addr[i];
     lp->init_block.filter[0] = 0x00000000;
     lp->init_block.filter[1] = 0x00000000;
-    lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr +
-           offsetof(struct pcnet32_private, rx_ring));
-    lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
-           offsetof(struct pcnet32_private, tx_ring));
+    lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
+    lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
 
     /* switch pcnet32 to 32bit mode */
     a->write_bcr(ioaddr, 20, 2);
@@ -1310,7 +1387,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
            if (pcnet32_debug & NETIF_MSG_PROBE)
                printk(", failed to detect IRQ line.\n");
            ret = -ENODEV;
-           goto err_free_consistent;
+           goto err_free_ring;
        }
        if (pcnet32_debug & NETIF_MSG_PROBE)
            printk(", probed IRQ %d.\n", dev->irq);
@@ -1341,7 +1418,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
     /* Fill in the generic fields of the device structure. */
     if (register_netdev(dev))
-       goto err_free_consistent;
+       goto err_free_ring;
 
     if (pdev) {
        pci_set_drvdata(pdev, dev);
@@ -1359,6 +1436,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
     return 0;
 
+err_free_ring:
+    pcnet32_free_ring(dev);
 err_free_consistent:
     pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
 err_free_netdev:
@@ -1369,6 +1448,86 @@ err_release_region:
 }
 
 
+static int pcnet32_alloc_ring(struct net_device *dev)
+{
+    struct pcnet32_private *lp = dev->priv;
+
+    if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+       &lp->tx_ring_dma_addr)) == NULL) {
+       if (pcnet32_debug & NETIF_MSG_DRV)
+           printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
+       return -ENOMEM;
+    }
+
+    if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+       &lp->rx_ring_dma_addr)) == NULL) {
+       if (pcnet32_debug & NETIF_MSG_DRV)
+           printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
+       return -ENOMEM;
+    }
+
+    if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) {
+       if (pcnet32_debug & NETIF_MSG_DRV)
+           printk(KERN_ERR PFX "Memory allocation failed.\n");
+       return -ENOMEM;
+    }
+    memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
+
+    if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) {
+       if (pcnet32_debug & NETIF_MSG_DRV)
+           printk(KERN_ERR PFX "Memory allocation failed.\n");
+       return -ENOMEM;
+    }
+    memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
+
+    if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) {
+       if (pcnet32_debug & NETIF_MSG_DRV)
+           printk(KERN_ERR PFX "Memory allocation failed.\n");
+       return -ENOMEM;
+    }
+    memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
+
+    if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) {
+       if (pcnet32_debug & NETIF_MSG_DRV)
+           printk(KERN_ERR PFX "Memory allocation failed.\n");
+       return -ENOMEM;
+    }
+    memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
+
+    return 0;
+}
+
+
+static void pcnet32_free_ring(struct net_device *dev)
+{
+    struct pcnet32_private *lp = dev->priv;
+
+    kfree(lp->tx_skbuff);
+    lp->tx_skbuff = NULL;
+
+    kfree(lp->rx_skbuff);
+    lp->rx_skbuff = NULL;
+
+    kfree(lp->tx_dma_addr);
+    lp->tx_dma_addr = NULL;
+
+    kfree(lp->rx_dma_addr);
+    lp->rx_dma_addr = NULL;
+
+    if (lp->tx_ring) {
+       pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+               lp->tx_ring, lp->tx_ring_dma_addr);
+       lp->tx_ring = NULL;
+    }
+
+    if (lp->rx_ring) {
+       pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+               lp->rx_ring, lp->rx_ring_dma_addr);
+       lp->rx_ring = NULL;
+    }
+}
+
+
 static int
 pcnet32_open(struct net_device *dev)
 {
@@ -1400,8 +1559,8 @@ pcnet32_open(struct net_device *dev)
     if (netif_msg_ifup(lp))
        printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
               dev->name, dev->irq,
-              (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)),
-              (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)),
+              (u32) (lp->tx_ring_dma_addr),
+              (u32) (lp->rx_ring_dma_addr),
               (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
 
     /* set/reset autoselect bit */
@@ -1521,7 +1680,7 @@ pcnet32_open(struct net_device *dev)
 
 err_free_ring:
     /* free any allocated skbuffs */
-    for (i = 0; i < RX_RING_SIZE; i++) {
+    for (i = 0; i < lp->rx_ring_size; i++) {
        lp->rx_ring[i].status = 0;
        if (lp->rx_skbuff[i]) {
            pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
@@ -1531,6 +1690,9 @@ err_free_ring:
        lp->rx_skbuff[i] = NULL;
        lp->rx_dma_addr[i] = 0;
     }
+
+    pcnet32_free_ring(dev);
+
     /*
      * Switch back to 16bit mode to avoid problems with dumb
      * DOS packet driver after a warm reboot
@@ -1562,7 +1724,7 @@ pcnet32_purge_tx_ring(struct net_device *dev)
     struct pcnet32_private *lp = dev->priv;
     int i;
 
-    for (i = 0; i < TX_RING_SIZE; i++) {
+    for (i = 0; i < lp->tx_ring_size; i++) {
        lp->tx_ring[i].status = 0;      /* CPU owns buffer */
        wmb();  /* Make sure adapter sees owner change */
        if (lp->tx_skbuff[i]) {
@@ -1587,7 +1749,7 @@ pcnet32_init_ring(struct net_device *dev)
     lp->cur_rx = lp->cur_tx = 0;
     lp->dirty_rx = lp->dirty_tx = 0;
 
-    for (i = 0; i < RX_RING_SIZE; i++) {
+    for (i = 0; i < lp->rx_ring_size; i++) {
        struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
        if (rx_skbuff == NULL) {
            if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
@@ -1611,20 +1773,18 @@ pcnet32_init_ring(struct net_device *dev)
     }
     /* The Tx buffer address is filled in as needed, but we do need to clear
      * the upper ownership bit. */
-    for (i = 0; i < TX_RING_SIZE; i++) {
+    for (i = 0; i < lp->tx_ring_size; i++) {
        lp->tx_ring[i].status = 0;      /* CPU owns buffer */
        wmb();  /* Make sure adapter sees owner change */
        lp->tx_ring[i].base = 0;
        lp->tx_dma_addr[i] = 0;
     }
 
-    lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+    lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
     for (i = 0; i < 6; i++)
        lp->init_block.phys_addr[i] = dev->dev_addr[i];
-    lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr +
-           offsetof(struct pcnet32_private, rx_ring));
-    lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
-           offsetof(struct pcnet32_private, tx_ring));
+    lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
+    lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
     wmb();     /* Make sure all changes are visible */
     return 0;
 }
@@ -1682,13 +1842,13 @@ pcnet32_tx_timeout (struct net_device *dev)
        printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
           lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
           lp->cur_rx);
-       for (i = 0 ; i < RX_RING_SIZE; i++)
+       for (i = 0 ; i < lp->rx_ring_size; i++)
        printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
               le32_to_cpu(lp->rx_ring[i].base),
               (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
               le32_to_cpu(lp->rx_ring[i].msg_length),
               le16_to_cpu(lp->rx_ring[i].status));
-       for (i = 0 ; i < TX_RING_SIZE; i++)
+       for (i = 0 ; i < lp->tx_ring_size; i++)
        printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
               le32_to_cpu(lp->tx_ring[i].base),
               (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
@@ -1729,7 +1889,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
     /* Fill in a Tx ring entry */
 
     /* Mask to ring buffer boundary. */
-    entry = lp->cur_tx & TX_RING_MOD_MASK;
+    entry = lp->cur_tx & lp->tx_mod_mask;
 
     /* Caution: the write order is important here, set the status
      * with the "ownership" bits last. */
@@ -1753,7 +1913,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
     dev->trans_start = jiffies;
 
-    if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) {
+    if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
        lp->tx_full = 1;
        netif_stop_queue(dev);
     }
@@ -1806,7 +1966,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
            int delta;
 
            while (dirty_tx != lp->cur_tx) {
-               int entry = dirty_tx & TX_RING_MOD_MASK;
+               int entry = dirty_tx & lp->tx_mod_mask;
                int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
 
                if (status < 0)
@@ -1864,18 +2024,18 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
                dirty_tx++;
            }
 
-           delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE);
-           if (delta > TX_RING_SIZE) {
+           delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
+           if (delta > lp->tx_ring_size) {
                if (netif_msg_drv(lp))
                    printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
                            dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
-               dirty_tx += TX_RING_SIZE;
-               delta -= TX_RING_SIZE;
+               dirty_tx += lp->tx_ring_size;
+               delta -= lp->tx_ring_size;
            }
 
            if (lp->tx_full &&
                netif_queue_stopped(dev) &&
-               delta < TX_RING_SIZE - 2) {
+               delta < lp->tx_ring_size - 2) {
                /* The ring is no longer full, clear tbusy. */
                lp->tx_full = 0;
                netif_wake_queue (dev);
@@ -1932,8 +2092,8 @@ static int
 pcnet32_rx(struct net_device *dev)
 {
     struct pcnet32_private *lp = dev->priv;
-    int entry = lp->cur_rx & RX_RING_MOD_MASK;
-    int boguscnt = RX_RING_SIZE / 2;
+    int entry = lp->cur_rx & lp->rx_mod_mask;
+    int boguscnt = lp->rx_ring_size / 2;
 
     /* If we own the next entry, it's a new packet. Send it up. */
     while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
@@ -1998,12 +2158,12 @@ pcnet32_rx(struct net_device *dev)
                    if (netif_msg_drv(lp))
                        printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
                                dev->name);
-                   for (i = 0; i < RX_RING_SIZE; i++)
+                   for (i = 0; i < lp->rx_ring_size; i++)
                        if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
-                                   & RX_RING_MOD_MASK].status) < 0)
+                                   & lp->rx_mod_mask].status) < 0)
                            break;
 
-                   if (i > RX_RING_SIZE -2) {
+                   if (i > lp->rx_ring_size -2) {
                        lp->stats.rx_dropped++;
                        lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
                        wmb();  /* Make sure adapter sees owner change */
@@ -2041,7 +2201,7 @@ pcnet32_rx(struct net_device *dev)
        lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
        wmb(); /* Make sure owner changes after all others are visible */
        lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
-       entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+       entry = (++lp->cur_rx) & lp->rx_mod_mask;
        if (--boguscnt <= 0) break;     /* don't stay in loop forever */
     }
 
@@ -2084,7 +2244,7 @@ pcnet32_close(struct net_device *dev)
     spin_lock_irqsave(&lp->lock, flags);
 
     /* free all allocated skbuffs */
-    for (i = 0; i < RX_RING_SIZE; i++) {
+    for (i = 0; i < lp->rx_ring_size; i++) {
        lp->rx_ring[i].status = 0;
        wmb();          /* Make sure adapter sees owner change */
        if (lp->rx_skbuff[i]) {
@@ -2096,7 +2256,7 @@ pcnet32_close(struct net_device *dev)
        lp->rx_dma_addr[i] = 0;
     }
 
-    for (i = 0; i < TX_RING_SIZE; i++) {
+    for (i = 0; i < lp->tx_ring_size; i++) {
        lp->tx_ring[i].status = 0;      /* CPU owns buffer */
        wmb();          /* Make sure adapter sees owner change */
        if (lp->tx_skbuff[i]) {
@@ -2265,6 +2425,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
        struct pcnet32_private *lp = dev->priv;
 
        unregister_netdev(dev);
+       pcnet32_free_ring(dev);
        release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
        pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
        free_netdev(dev);
@@ -2340,6 +2501,7 @@ static void __exit pcnet32_cleanup_module(void)
        struct pcnet32_private *lp = pcnet32_dev->priv;
        next_dev = lp->next;
        unregister_netdev(pcnet32_dev);
+       pcnet32_free_ring(pcnet32_dev);
        release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
        pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
        free_netdev(pcnet32_dev);
index 14f4de1a8180289db3f05ca5395c3a93d6df0296..c782a6329805713493f930681176d3e8a32d8c47 100644 (file)
@@ -12,14 +12,6 @@ config PHYLIB
          devices.  This option provides infrastructure for
          managing PHY devices.
 
-config PHYCONTROL
-       bool "  Support for automatically handling PHY state changes"
-       depends on PHYLIB
-       help
-         Adds code to perform all the work for keeping PHY link
-         state (speed/duplex/etc) up-to-date.  Also handles
-         interrupts.
-
 comment "MII PHY device drivers"
        depends on PHYLIB
 
index d9e11f93bf3a34c54560dc724d56ce71b85d8141..9209da9dde0da9738d662e7a8167ae2136f2d831 100644 (file)
@@ -242,10 +242,6 @@ EXPORT_SYMBOL(phy_sanitize_settings);
  *   choose the next best ones from the ones selected, so we don't
  *   care if ethtool tries to give us bad values
  *
- * A note about the PHYCONTROL Layer.  If you turn off
- * CONFIG_PHYCONTROL, you will need to read the PHY status
- * registers after this function completes, and update your
- * controller manually.
  */
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 {
@@ -380,7 +376,6 @@ int phy_start_aneg(struct phy_device *phydev)
 
        err = phydev->drv->config_aneg(phydev);
 
-#ifdef CONFIG_PHYCONTROL
        if (err < 0)
                goto out_unlock;
 
@@ -395,14 +390,12 @@ int phy_start_aneg(struct phy_device *phydev)
        }
 
 out_unlock:
-#endif
        spin_unlock(&phydev->lock);
        return err;
 }
 EXPORT_SYMBOL(phy_start_aneg);
 
 
-#ifdef CONFIG_PHYCONTROL
 static void phy_change(void *data);
 static void phy_timer(unsigned long data);
 
@@ -868,4 +861,3 @@ static void phy_timer(unsigned long data)
        mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
 }
 
-#endif /* CONFIG_PHYCONTROL */
index 33f7bdb5857c8d97cef75cde5fdb69de33830771..6da1aa0706a1441ba227829ec529bd98089cf0a9 100644 (file)
@@ -101,7 +101,6 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
        return dev;
 }
 
-#ifdef CONFIG_PHYCONTROL
 /* phy_prepare_link:
  *
  * description: Tells the PHY infrastructure to handle the
@@ -160,8 +159,6 @@ void phy_disconnect(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_disconnect);
 
-#endif /* CONFIG_PHYCONTROL */
-
 /* phy_attach:
  *
  *   description: Called by drivers to attach to a particular PHY
index afb3f186b8843b96b4ad0214c18e40515a11b091..159b56a56ef49112fc5e58083c76a3066eb76958 100644 (file)
@@ -1027,6 +1027,7 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
        .get_strings            = rtl8169_get_strings,
        .get_stats_count        = rtl8169_get_stats_count,
        .get_ethtool_stats      = rtl8169_get_ethtool_stats,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
@@ -1511,6 +1512,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Get MAC address.  FIXME: read EEPROM */
        for (i = 0; i < MAC_ADDR_LEN; i++)
                dev->dev_addr[i] = RTL_R8(MAC0 + i);
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        dev->open = rtl8169_open;
        dev->hard_start_xmit = rtl8169_start_xmit;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
new file mode 100644 (file)
index 0000000..12cde06
--- /dev/null
@@ -0,0 +1,574 @@
+/*
+ * rionet - Ethernet driver over RapidIO messaging services
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+
+#define DRV_NAME        "rionet"
+#define DRV_VERSION     "0.2"
+#define DRV_AUTHOR      "Matt Porter <mporter@kernel.crashing.org>"
+#define DRV_DESC        "Ethernet over RapidIO"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+#define RIONET_DEFAULT_MSGLEVEL \
+                       (NETIF_MSG_DRV          | \
+                        NETIF_MSG_LINK         | \
+                        NETIF_MSG_RX_ERR       | \
+                        NETIF_MSG_TX_ERR)
+
+#define RIONET_DOORBELL_JOIN   0x1000
+#define RIONET_DOORBELL_LEAVE  0x1001
+
+#define RIONET_MAILBOX         0
+
+#define RIONET_TX_RING_SIZE    CONFIG_RIONET_TX_SIZE
+#define RIONET_RX_RING_SIZE    CONFIG_RIONET_RX_SIZE
+
+static LIST_HEAD(rionet_peers);
+
+struct rionet_private {
+       struct rio_mport *mport;
+       struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
+       struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
+       struct net_device_stats stats;
+       int rx_slot;
+       int tx_slot;
+       int tx_cnt;
+       int ack_slot;
+       spinlock_t lock;
+       spinlock_t tx_lock;
+       u32 msg_enable;
+};
+
+struct rionet_peer {
+       struct list_head node;
+       struct rio_dev *rdev;
+       struct resource *res;
+};
+
+static int rionet_check = 0;
+static int rionet_capable = 1;
+
+/*
+ * This is a fast lookup table for for translating TX
+ * Ethernet packets into a destination RIO device. It
+ * could be made into a hash table to save memory depending
+ * on system trade-offs.
+ */
+static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES];
+
+#define is_rionet_capable(pef, src_ops, dst_ops)               \
+                       ((pef & RIO_PEF_INB_MBOX) &&            \
+                        (pef & RIO_PEF_INB_DOORBELL) &&        \
+                        (src_ops & RIO_SRC_OPS_DOORBELL) &&    \
+                        (dst_ops & RIO_DST_OPS_DOORBELL))
+#define dev_rionet_capable(dev) \
+       is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
+
+#define RIONET_MAC_MATCH(x)    (*(u32 *)x == 0x00010001)
+#define RIONET_GET_DESTID(x)   (*(u16 *)(x + 4))
+
+static struct net_device_stats *rionet_stats(struct net_device *ndev)
+{
+       struct rionet_private *rnet = ndev->priv;
+       return &rnet->stats;
+}
+
+static int rionet_rx_clean(struct net_device *ndev)
+{
+       int i;
+       int error = 0;
+       struct rionet_private *rnet = ndev->priv;
+       void *data;
+
+       i = rnet->rx_slot;
+
+       do {
+               if (!rnet->rx_skb[i])
+                       continue;
+
+               if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
+                       break;
+
+               rnet->rx_skb[i]->data = data;
+               skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
+               rnet->rx_skb[i]->dev = ndev;
+               rnet->rx_skb[i]->protocol =
+                   eth_type_trans(rnet->rx_skb[i], ndev);
+               error = netif_rx(rnet->rx_skb[i]);
+
+               if (error == NET_RX_DROP) {
+                       rnet->stats.rx_dropped++;
+               } else if (error == NET_RX_BAD) {
+                       if (netif_msg_rx_err(rnet))
+                               printk(KERN_WARNING "%s: bad rx packet\n",
+                                      DRV_NAME);
+                       rnet->stats.rx_errors++;
+               } else {
+                       rnet->stats.rx_packets++;
+                       rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE;
+               }
+
+       } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
+
+       return i;
+}
+
+static void rionet_rx_fill(struct net_device *ndev, int end)
+{
+       int i;
+       struct rionet_private *rnet = ndev->priv;
+
+       i = rnet->rx_slot;
+       do {
+               rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
+
+               if (!rnet->rx_skb[i])
+                       break;
+
+               rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
+                                  rnet->rx_skb[i]->data);
+       } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
+
+       rnet->rx_slot = i;
+}
+
+static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
+                              struct rio_dev *rdev)
+{
+       struct rionet_private *rnet = ndev->priv;
+
+       rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
+       rnet->tx_skb[rnet->tx_slot] = skb;
+
+       rnet->stats.tx_packets++;
+       rnet->stats.tx_bytes += skb->len;
+
+       if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
+               netif_stop_queue(ndev);
+
+       ++rnet->tx_slot;
+       rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
+
+       if (netif_msg_tx_queued(rnet))
+               printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
+                      (u32) skb, skb->len);
+
+       return 0;
+}
+
+static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       int i;
+       struct rionet_private *rnet = ndev->priv;
+       struct ethhdr *eth = (struct ethhdr *)skb->data;
+       u16 destid;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       if (!spin_trylock(&rnet->tx_lock)) {
+               local_irq_restore(flags);
+               return NETDEV_TX_LOCKED;
+       }
+
+       if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+               netif_stop_queue(ndev);
+               spin_unlock_irqrestore(&rnet->tx_lock, flags);
+               printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+                      ndev->name);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (eth->h_dest[0] & 0x01) {
+               for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++)
+                       if (rionet_active[i])
+                               rionet_queue_tx_msg(skb, ndev,
+                                                   rionet_active[i]);
+       } else if (RIONET_MAC_MATCH(eth->h_dest)) {
+               destid = RIONET_GET_DESTID(eth->h_dest);
+               if (rionet_active[destid])
+                       rionet_queue_tx_msg(skb, ndev, rionet_active[destid]);
+       }
+
+       spin_unlock_irqrestore(&rnet->tx_lock, flags);
+
+       return 0;
+}
+
+static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
+                              u16 info)
+{
+       struct net_device *ndev = dev_id;
+       struct rionet_private *rnet = ndev->priv;
+       struct rionet_peer *peer;
+
+       if (netif_msg_intr(rnet))
+               printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
+                      DRV_NAME, sid, tid, info);
+       if (info == RIONET_DOORBELL_JOIN) {
+               if (!rionet_active[sid]) {
+                       list_for_each_entry(peer, &rionet_peers, node) {
+                               if (peer->rdev->destid == sid)
+                                       rionet_active[sid] = peer->rdev;
+                       }
+                       rio_mport_send_doorbell(mport, sid,
+                                               RIONET_DOORBELL_JOIN);
+               }
+       } else if (info == RIONET_DOORBELL_LEAVE) {
+               rionet_active[sid] = NULL;
+       } else {
+               if (netif_msg_intr(rnet))
+                       printk(KERN_WARNING "%s: unhandled doorbell\n",
+                              DRV_NAME);
+       }
+}
+
+static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
+{
+       int n;
+       struct net_device *ndev = dev_id;
+       struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
+
+       if (netif_msg_intr(rnet))
+               printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
+                      DRV_NAME, mbox, slot);
+
+       spin_lock(&rnet->lock);
+       if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
+               rionet_rx_fill(ndev, n);
+       spin_unlock(&rnet->lock);
+}
+
+static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
+{
+       struct net_device *ndev = dev_id;
+       struct rionet_private *rnet = ndev->priv;
+
+       spin_lock(&rnet->lock);
+
+       if (netif_msg_intr(rnet))
+               printk(KERN_INFO
+                      "%s: outbound message event, mbox %d slot %d\n",
+                      DRV_NAME, mbox, slot);
+
+       while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
+               /* dma unmap single */
+               dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
+               rnet->tx_skb[rnet->ack_slot] = NULL;
+               ++rnet->ack_slot;
+               rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
+               rnet->tx_cnt--;
+       }
+
+       if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+               netif_wake_queue(ndev);
+
+       spin_unlock(&rnet->lock);
+}
+
+static int rionet_open(struct net_device *ndev)
+{
+       int i, rc = 0;
+       struct rionet_peer *peer, *tmp;
+       u32 pwdcsr;
+       struct rionet_private *rnet = ndev->priv;
+
+       if (netif_msg_ifup(rnet))
+               printk(KERN_INFO "%s: open\n", DRV_NAME);
+
+       if ((rc = rio_request_inb_dbell(rnet->mport,
+                                       (void *)ndev,
+                                       RIONET_DOORBELL_JOIN,
+                                       RIONET_DOORBELL_LEAVE,
+                                       rionet_dbell_event)) < 0)
+               goto out;
+
+       if ((rc = rio_request_inb_mbox(rnet->mport,
+                                      (void *)ndev,
+                                      RIONET_MAILBOX,
+                                      RIONET_RX_RING_SIZE,
+                                      rionet_inb_msg_event)) < 0)
+               goto out;
+
+       if ((rc = rio_request_outb_mbox(rnet->mport,
+                                       (void *)ndev,
+                                       RIONET_MAILBOX,
+                                       RIONET_TX_RING_SIZE,
+                                       rionet_outb_msg_event)) < 0)
+               goto out;
+
+       /* Initialize inbound message ring */
+       for (i = 0; i < RIONET_RX_RING_SIZE; i++)
+               rnet->rx_skb[i] = NULL;
+       rnet->rx_slot = 0;
+       rionet_rx_fill(ndev, 0);
+
+       rnet->tx_slot = 0;
+       rnet->tx_cnt = 0;
+       rnet->ack_slot = 0;
+
+       netif_carrier_on(ndev);
+       netif_start_queue(ndev);
+
+       list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+               if (!(peer->res = rio_request_outb_dbell(peer->rdev,
+                                                        RIONET_DOORBELL_JOIN,
+                                                        RIONET_DOORBELL_LEAVE)))
+               {
+                       printk(KERN_ERR "%s: error requesting doorbells\n",
+                              DRV_NAME);
+                       continue;
+               }
+
+               /*
+                * If device has initialized inbound doorbells,
+                * send a join message
+                */
+               rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
+               if (pwdcsr & RIO_DOORBELL_AVAIL)
+                       rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
+       }
+
+      out:
+       return rc;
+}
+
+static int rionet_close(struct net_device *ndev)
+{
+       struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
+       struct rionet_peer *peer, *tmp;
+       int i;
+
+       if (netif_msg_ifup(rnet))
+               printk(KERN_INFO "%s: close\n", DRV_NAME);
+
+       netif_stop_queue(ndev);
+       netif_carrier_off(ndev);
+
+       for (i = 0; i < RIONET_RX_RING_SIZE; i++)
+               if (rnet->rx_skb[i])
+                       kfree_skb(rnet->rx_skb[i]);
+
+       list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+               if (rionet_active[peer->rdev->destid]) {
+                       rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
+                       rionet_active[peer->rdev->destid] = NULL;
+               }
+               rio_release_outb_dbell(peer->rdev, peer->res);
+       }
+
+       rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
+                             RIONET_DOORBELL_LEAVE);
+       rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
+       rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
+
+       return 0;
+}
+
+static void rionet_remove(struct rio_dev *rdev)
+{
+       struct net_device *ndev = NULL;
+       struct rionet_peer *peer, *tmp;
+
+       unregister_netdev(ndev);
+       kfree(ndev);
+
+       list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+               list_del(&peer->node);
+               kfree(peer);
+       }
+}
+
+static void rionet_get_drvinfo(struct net_device *ndev,
+                              struct ethtool_drvinfo *info)
+{
+       struct rionet_private *rnet = ndev->priv;
+
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->fw_version, "n/a");
+       strcpy(info->bus_info, rnet->mport->name);
+}
+
+static u32 rionet_get_msglevel(struct net_device *ndev)
+{
+       struct rionet_private *rnet = ndev->priv;
+
+       return rnet->msg_enable;
+}
+
+static void rionet_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct rionet_private *rnet = ndev->priv;
+
+       rnet->msg_enable = value;
+}
+
+static struct ethtool_ops rionet_ethtool_ops = {
+       .get_drvinfo = rionet_get_drvinfo,
+       .get_msglevel = rionet_get_msglevel,
+       .set_msglevel = rionet_set_msglevel,
+       .get_link = ethtool_op_get_link,
+};
+
+static int rionet_setup_netdev(struct rio_mport *mport)
+{
+       int rc = 0;
+       struct net_device *ndev = NULL;
+       struct rionet_private *rnet;
+       u16 device_id;
+
+       /* Allocate our net_device structure */
+       ndev = alloc_etherdev(sizeof(struct rionet_private));
+       if (ndev == NULL) {
+               printk(KERN_INFO "%s: could not allocate ethernet device.\n",
+                      DRV_NAME);
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /* Set up private area */
+       rnet = (struct rionet_private *)ndev->priv;
+       rnet->mport = mport;
+
+       /* Set the default MAC address */
+       device_id = rio_local_get_device_id(mport);
+       ndev->dev_addr[0] = 0x00;
+       ndev->dev_addr[1] = 0x01;
+       ndev->dev_addr[2] = 0x00;
+       ndev->dev_addr[3] = 0x01;
+       ndev->dev_addr[4] = device_id >> 8;
+       ndev->dev_addr[5] = device_id & 0xff;
+
+       /* Fill in the driver function table */
+       ndev->open = &rionet_open;
+       ndev->hard_start_xmit = &rionet_start_xmit;
+       ndev->stop = &rionet_close;
+       ndev->get_stats = &rionet_stats;
+       ndev->mtu = RIO_MAX_MSG_SIZE - 14;
+       ndev->features = NETIF_F_LLTX;
+       SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+
+       SET_MODULE_OWNER(ndev);
+
+       spin_lock_init(&rnet->lock);
+       spin_lock_init(&rnet->tx_lock);
+
+       rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
+
+       rc = register_netdev(ndev);
+       if (rc != 0)
+               goto out;
+
+       printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+              ndev->name,
+              DRV_NAME,
+              DRV_DESC,
+              DRV_VERSION,
+              ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
+              ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
+
+      out:
+       return rc;
+}
+
+/*
+ * XXX Make multi-net safe
+ */
+static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+{
+       int rc = -ENODEV;
+       u32 lpef, lsrc_ops, ldst_ops;
+       struct rionet_peer *peer;
+
+       /* If local device is not rionet capable, give up quickly */
+       if (!rionet_capable)
+               goto out;
+
+       /*
+        * First time through, make sure local device is rionet
+        * capable, setup netdev,  and set flags so this is skipped
+        * on later probes
+        */
+       if (!rionet_check) {
+               rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
+               rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
+                                        &lsrc_ops);
+               rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
+                                        &ldst_ops);
+               if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
+                       printk(KERN_ERR
+                              "%s: local device is not network capable\n",
+                              DRV_NAME);
+                       rionet_check = 1;
+                       rionet_capable = 0;
+                       goto out;
+               }
+
+               rc = rionet_setup_netdev(rdev->net->hport);
+               rionet_check = 1;
+       }
+
+       /*
+        * If the remote device has mailbox/doorbell capabilities,
+        * add it to the peer list.
+        */
+       if (dev_rionet_capable(rdev)) {
+               if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               peer->rdev = rdev;
+               list_add_tail(&peer->node, &rionet_peers);
+       }
+
+      out:
+       return rc;
+}
+
+static struct rio_device_id rionet_id_table[] = {
+       {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}
+};
+
+static struct rio_driver rionet_driver = {
+       .name = "rionet",
+       .id_table = rionet_id_table,
+       .probe = rionet_probe,
+       .remove = rionet_remove,
+};
+
+static int __init rionet_init(void)
+{
+       return rio_register_driver(&rionet_driver);
+}
+
+static void __exit rionet_exit(void)
+{
+       rio_unregister_driver(&rionet_driver);
+}
+
+module_init(rionet_init);
+module_exit(rionet_exit);
index 7cefe5507b9e128bef1f621aeb29837105c254cf..00179bc3437fccea29fa780d9df5da4233d1f886 100644 (file)
@@ -814,6 +814,17 @@ typedef struct _XENA_dev_config {
        u64 rxgxs_ber_0;        /* CHANGED */
        u64 rxgxs_ber_1;        /* CHANGED */
 
+       u64 spi_control;
+#define SPI_CONTROL_KEY(key)           vBIT(key,0,4)
+#define SPI_CONTROL_BYTECNT(cnt)       vBIT(cnt,29,3)
+#define SPI_CONTROL_CMD(cmd)           vBIT(cmd,32,8)
+#define SPI_CONTROL_ADDR(addr)         vBIT(addr,40,24)
+#define SPI_CONTROL_SEL1               BIT(4)
+#define SPI_CONTROL_REQ                        BIT(7)
+#define SPI_CONTROL_NACK               BIT(5)
+#define SPI_CONTROL_DONE               BIT(6)
+       u64 spi_data;
+#define SPI_DATA_WRITE(data,len)       vBIT(data,0,len)
 } XENA_dev_config_t;
 
 #define XENA_REG_SPACE sizeof(XENA_dev_config_t)
index dd451e099a4c46bd650dbc1696c40f7fae3468f6..d303d162974f9caedf905bd603c583a8b6c538b9 100644 (file)
 #include "s2io.h"
 #include "s2io-regs.h"
 
+#define DRV_VERSION "Version 2.0.9.1"
+
 /* S2io Driver name & version. */
 static char s2io_driver_name[] = "Neterion";
-static char s2io_driver_version[] = "Version 2.0.8.1";
+static char s2io_driver_version[] = DRV_VERSION;
 
 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
 {
@@ -307,6 +309,8 @@ static unsigned int indicate_max_pkts;
 #endif
 /* Frequency of Rx desc syncs expressed as power of 2 */
 static unsigned int rxsync_frequency = 3;
+/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
+static unsigned int intr_type = 0;
 
 /*
  * S2IO device table.
@@ -1396,8 +1400,13 @@ static int init_nic(struct s2io_nic *nic)
                writeq(val64, &bar0->rti_data1_mem);
 
                val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
-                   RTI_DATA2_MEM_RX_UFC_B(0x2) |
-                   RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
+                   RTI_DATA2_MEM_RX_UFC_B(0x2) ;
+               if (nic->intr_type == MSI_X)
+                   val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
+                               RTI_DATA2_MEM_RX_UFC_D(0x40));
+               else
+                   val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
+                               RTI_DATA2_MEM_RX_UFC_D(0x80));
                writeq(val64, &bar0->rti_data2_mem);
 
                for (i = 0; i < config->rx_ring_num; i++) {
@@ -1507,17 +1516,15 @@ static int init_nic(struct s2io_nic *nic)
 #define LINK_UP_DOWN_INTERRUPT         1
 #define MAC_RMAC_ERR_TIMER             2
 
-#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
-#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
-#else
 int s2io_link_fault_indication(nic_t *nic)
 {
+       if (nic->intr_type != INTA)
+               return MAC_RMAC_ERR_TIMER;
        if (nic->device_type == XFRAME_II_DEVICE)
                return LINK_UP_DOWN_INTERRUPT;
        else
                return MAC_RMAC_ERR_TIMER;
 }
-#endif
 
 /**
  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
@@ -1941,11 +1948,14 @@ static int start_nic(struct s2io_nic *nic)
        }
 
        /*  Enable select interrupts */
-       interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
-       interruptible |= TX_PIC_INTR | RX_PIC_INTR;
-       interruptible |= TX_MAC_INTR | RX_MAC_INTR;
-
-       en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
+       if (nic->intr_type != INTA)
+               en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
+       else {
+               interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+               interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+               interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+               en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
+       }
 
        /*
         * With some switches, link might be already up at this point.
@@ -2633,11 +2643,11 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
                        err = txdlp->Control_1 & TXD_T_CODE;
                        if ((err >> 48) == 0xA) {
                                DBG_PRINT(TX_DBG, "TxD returned due \
-                                               to loss of link\n");
+to loss of link\n");
                        }
                        else {
                                DBG_PRINT(ERR_DBG, "***TxD error \
-                                               %llx\n", err);
+%llx\n", err);
                        }
                }
 
@@ -2854,6 +2864,9 @@ void s2io_reset(nic_t * sp)
        /* Set swapper to enable I/O register access */
        s2io_set_swapper(sp);
 
+       /* Restore the MSIX table entries from local variables */
+       restore_xmsi_data(sp);
+
        /* Clear certain PCI/PCI-X fields after reset */
        if (sp->device_type == XFRAME_II_DEVICE) {
                /* Clear parity err detect bit */
@@ -2983,8 +2996,9 @@ int s2io_set_swapper(nic_t * sp)
                 SWAPPER_CTRL_RXD_W_FE |
                 SWAPPER_CTRL_RXF_W_FE |
                 SWAPPER_CTRL_XMSI_FE |
-                SWAPPER_CTRL_XMSI_SE |
                 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
+       if (sp->intr_type == INTA)
+               val64 |= SWAPPER_CTRL_XMSI_SE;
        writeq(val64, &bar0->swapper_ctrl);
 #else
        /*
@@ -3005,8 +3019,9 @@ int s2io_set_swapper(nic_t * sp)
                 SWAPPER_CTRL_RXD_W_SE |
                 SWAPPER_CTRL_RXF_W_FE |
                 SWAPPER_CTRL_XMSI_FE |
-                SWAPPER_CTRL_XMSI_SE |
                 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
+       if (sp->intr_type == INTA)
+               val64 |= SWAPPER_CTRL_XMSI_SE;
        writeq(val64, &bar0->swapper_ctrl);
 #endif
        val64 = readq(&bar0->swapper_ctrl);
@@ -3028,6 +3043,201 @@ int s2io_set_swapper(nic_t * sp)
        return SUCCESS;
 }
 
+int wait_for_msix_trans(nic_t *nic, int i)
+{
+       XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+       u64 val64;
+       int ret = 0, cnt = 0;
+
+       do {
+               val64 = readq(&bar0->xmsi_access);
+               if (!(val64 & BIT(15)))
+                       break;
+               mdelay(1);
+               cnt++;
+       } while(cnt < 5);
+       if (cnt == 5) {
+               DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+void restore_xmsi_data(nic_t *nic)
+{
+       XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+       u64 val64;
+       int i;
+
+       for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+               writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
+               writeq(nic->msix_info[i].data, &bar0->xmsi_data);
+               val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
+               writeq(val64, &bar0->xmsi_access);
+               if (wait_for_msix_trans(nic, i)) {
+                       DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+                       continue;
+               }
+       }
+}
+
+void store_xmsi_data(nic_t *nic)
+{
+       XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+       u64 val64, addr, data;
+       int i;
+
+       /* Store and display */
+       for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+               val64 = (BIT(15) | vBIT(i, 26, 6));
+               writeq(val64, &bar0->xmsi_access);
+               if (wait_for_msix_trans(nic, i)) {
+                       DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+                       continue;
+               }
+               addr = readq(&bar0->xmsi_address);
+               data = readq(&bar0->xmsi_data);
+               if (addr && data) {
+                       nic->msix_info[i].addr = addr;
+                       nic->msix_info[i].data = data;
+               }
+       }
+}
+
+int s2io_enable_msi(nic_t *nic)
+{
+       XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+       u16 msi_ctrl, msg_val;
+       struct config_param *config = &nic->config;
+       struct net_device *dev = nic->dev;
+       u64 val64, tx_mat, rx_mat;
+       int i, err;
+
+       val64 = readq(&bar0->pic_control);
+       val64 &= ~BIT(1);
+       writeq(val64, &bar0->pic_control);
+
+       err = pci_enable_msi(nic->pdev);
+       if (err) {
+               DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
+                         nic->dev->name);
+               return err;
+       }
+
+       /*
+        * Enable MSI and use MSI-1 in stead of the standard MSI-0
+        * for interrupt handling.
+        */
+       pci_read_config_word(nic->pdev, 0x4c, &msg_val);
+       msg_val ^= 0x1;
+       pci_write_config_word(nic->pdev, 0x4c, msg_val);
+       pci_read_config_word(nic->pdev, 0x4c, &msg_val);
+
+       pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
+       msi_ctrl |= 0x10;
+       pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
+
+       /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
+       tx_mat = readq(&bar0->tx_mat0_n[0]);
+       for (i=0; i<config->tx_fifo_num; i++) {
+               tx_mat |= TX_MAT_SET(i, 1);
+       }
+       writeq(tx_mat, &bar0->tx_mat0_n[0]);
+
+       rx_mat = readq(&bar0->rx_mat);
+       for (i=0; i<config->rx_ring_num; i++) {
+               rx_mat |= RX_MAT_SET(i, 1);
+       }
+       writeq(rx_mat, &bar0->rx_mat);
+
+       dev->irq = nic->pdev->irq;
+       return 0;
+}
+
+int s2io_enable_msi_x(nic_t *nic)
+{
+       XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+       u64 tx_mat, rx_mat;
+       u16 msi_control; /* Temp variable */
+       int ret, i, j, msix_indx = 1;
+
+       nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
+                              GFP_KERNEL);
+       if (nic->entries == NULL) {
+               DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
+               return -ENOMEM;
+       }
+       memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+
+       nic->s2io_entries =
+               kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
+                                  GFP_KERNEL);
+       if (nic->s2io_entries == NULL) {
+               DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
+               kfree(nic->entries);
+               return -ENOMEM;
+       }
+       memset(nic->s2io_entries, 0,
+              MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
+
+       for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+               nic->entries[i].entry = i;
+               nic->s2io_entries[i].entry = i;
+               nic->s2io_entries[i].arg = NULL;
+               nic->s2io_entries[i].in_use = 0;
+       }
+
+       tx_mat = readq(&bar0->tx_mat0_n[0]);
+       for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
+               tx_mat |= TX_MAT_SET(i, msix_indx);
+               nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
+               nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
+               nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+       }
+       writeq(tx_mat, &bar0->tx_mat0_n[0]);
+
+       if (!nic->config.bimodal) {
+               rx_mat = readq(&bar0->rx_mat);
+               for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
+                       rx_mat |= RX_MAT_SET(j, msix_indx);
+                       nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
+                       nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
+                       nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+               }
+               writeq(rx_mat, &bar0->rx_mat);
+       } else {
+               tx_mat = readq(&bar0->tx_mat0_n[7]);
+               for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
+                       tx_mat |= TX_MAT_SET(i, msix_indx);
+                       nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
+                       nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
+                       nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+               }
+               writeq(tx_mat, &bar0->tx_mat0_n[7]);
+       }
+
+       ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+       if (ret) {
+               DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
+               kfree(nic->entries);
+               kfree(nic->s2io_entries);
+               nic->entries = NULL;
+               nic->s2io_entries = NULL;
+               return -ENOMEM;
+       }
+
+       /*
+        * To enable MSI-X, MSI also needs to be enabled, due to a bug
+        * in the herc NIC. (Temp change, needs to be removed later)
+        */
+       pci_read_config_word(nic->pdev, 0x42, &msi_control);
+       msi_control |= 0x1; /* Enable MSI */
+       pci_write_config_word(nic->pdev, 0x42, msi_control);
+
+       return 0;
+}
+
 /* ********************************************************* *
  * Functions defined below concern the OS part of the driver *
  * ********************************************************* */
@@ -3048,6 +3258,8 @@ int s2io_open(struct net_device *dev)
 {
        nic_t *sp = dev->priv;
        int err = 0;
+       int i;
+       u16 msi_control; /* Temp variable */
 
        /*
         * Make sure you have link off by default every time
@@ -3064,13 +3276,55 @@ int s2io_open(struct net_device *dev)
                goto hw_init_failed;
        }
 
+       /* Store the values of the MSIX table in the nic_t structure */
+       store_xmsi_data(sp);
+
        /* After proper initialization of H/W, register ISR */
-       err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
-                         sp->name, dev);
-       if (err) {
-               DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
-                         dev->name);
-               goto isr_registration_failed;
+       if (sp->intr_type == MSI) {
+               err = request_irq((int) sp->pdev->irq, s2io_msi_handle, 
+                       SA_SHIRQ, sp->name, dev);
+               if (err) {
+                       DBG_PRINT(ERR_DBG, "%s: MSI registration \
+failed\n", dev->name);
+                       goto isr_registration_failed;
+               }
+       }
+       if (sp->intr_type == MSI_X) {
+               for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
+                       if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
+                               sprintf(sp->desc1, "%s:MSI-X-%d-TX",
+                                       dev->name, i);
+                               err = request_irq(sp->entries[i].vector,
+                                         s2io_msix_fifo_handle, 0, sp->desc1,
+                                         sp->s2io_entries[i].arg);
+                               DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, 
+                                                       sp->msix_info[i].addr);
+                       } else {
+                               sprintf(sp->desc2, "%s:MSI-X-%d-RX",
+                                       dev->name, i);
+                               err = request_irq(sp->entries[i].vector,
+                                         s2io_msix_ring_handle, 0, sp->desc2,
+                                         sp->s2io_entries[i].arg);
+                               DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, 
+                                                       sp->msix_info[i].addr);
+                       }
+                       if (err) {
+                               DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
+failed\n", dev->name, i);
+                               DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
+                               goto isr_registration_failed;
+                       }
+                       sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
+               }
+       }
+       if (sp->intr_type == INTA) {
+               err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
+                               sp->name, dev);
+               if (err) {
+                       DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+                                 dev->name);
+                       goto isr_registration_failed;
+               }
        }
 
        if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
@@ -3083,11 +3337,37 @@ int s2io_open(struct net_device *dev)
        return 0;
 
 setting_mac_address_failed:
-       free_irq(sp->pdev->irq, dev);
+       if (sp->intr_type != MSI_X)
+               free_irq(sp->pdev->irq, dev);
 isr_registration_failed:
        del_timer_sync(&sp->alarm_timer);
+       if (sp->intr_type == MSI_X) {
+               if (sp->device_type == XFRAME_II_DEVICE) {
+                       for (i=1; (sp->s2io_entries[i].in_use == 
+                               MSIX_REGISTERED_SUCCESS); i++) {
+                               int vector = sp->entries[i].vector;
+                               void *arg = sp->s2io_entries[i].arg;
+
+                               free_irq(vector, arg);
+                       }
+                       pci_disable_msix(sp->pdev);
+
+                       /* Temp */
+                       pci_read_config_word(sp->pdev, 0x42, &msi_control);
+                       msi_control &= 0xFFFE; /* Disable MSI */
+                       pci_write_config_word(sp->pdev, 0x42, msi_control);
+               }
+       }
+       else if (sp->intr_type == MSI)
+               pci_disable_msi(sp->pdev);
        s2io_reset(sp);
 hw_init_failed:
+       if (sp->intr_type == MSI_X) {
+               if (sp->entries)
+                       kfree(sp->entries);
+               if (sp->s2io_entries)
+                       kfree(sp->s2io_entries);
+       }
        return err;
 }
 
@@ -3107,12 +3387,35 @@ hw_init_failed:
 int s2io_close(struct net_device *dev)
 {
        nic_t *sp = dev->priv;
+       int i;
+       u16 msi_control;
+
        flush_scheduled_work();
        netif_stop_queue(dev);
        /* Reset card, kill tasklet and free Tx and Rx buffers. */
        s2io_card_down(sp);
 
-       free_irq(sp->pdev->irq, dev);
+       if (sp->intr_type == MSI_X) {
+               if (sp->device_type == XFRAME_II_DEVICE) {
+                       for (i=1; (sp->s2io_entries[i].in_use == 
+                                       MSIX_REGISTERED_SUCCESS); i++) {
+                               int vector = sp->entries[i].vector;
+                               void *arg = sp->s2io_entries[i].arg;
+
+                               free_irq(vector, arg);
+                       }
+                       pci_read_config_word(sp->pdev, 0x42, &msi_control);
+                       msi_control &= 0xFFFE; /* Disable MSI */
+                       pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+                       pci_disable_msix(sp->pdev);
+               }
+       }
+       else {
+               free_irq(sp->pdev->irq, dev);
+               if (sp->intr_type == MSI)
+                       pci_disable_msi(sp->pdev);
+       }       
        sp->device_close_flag = TRUE;   /* Device is shut down. */
        return 0;
 }
@@ -3278,6 +3581,104 @@ s2io_alarm_handle(unsigned long data)
        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
 }
 
+static irqreturn_t
+s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct net_device *dev = (struct net_device *) dev_id;
+       nic_t *sp = dev->priv;
+       int i;
+       int ret;
+       mac_info_t *mac_control;
+       struct config_param *config;
+
+       atomic_inc(&sp->isr_cnt);
+       mac_control = &sp->mac_control;
+       config = &sp->config;
+       DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
+
+       /* If Intr is because of Rx Traffic */
+       for (i = 0; i < config->rx_ring_num; i++)
+               rx_intr_handler(&mac_control->rings[i]);
+
+       /* If Intr is because of Tx Traffic */
+       for (i = 0; i < config->tx_fifo_num; i++)
+               tx_intr_handler(&mac_control->fifos[i]);
+
+       /*
+        * If the Rx buffer count is below the panic threshold then
+        * reallocate the buffers from the interrupt handler itself,
+        * else schedule a tasklet to reallocate the buffers.
+        */
+       for (i = 0; i < config->rx_ring_num; i++) {
+               int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
+               int level = rx_buffer_level(sp, rxb_size, i);
+
+               if ((level == PANIC) && (!TASKLET_IN_USE)) {
+                       DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
+                       DBG_PRINT(INTR_DBG, "PANIC levels\n");
+                       if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
+                               DBG_PRINT(ERR_DBG, "%s:Out of memory",
+                                         dev->name);
+                               DBG_PRINT(ERR_DBG, " in ISR!!\n");
+                               clear_bit(0, (&sp->tasklet_status));
+                               atomic_dec(&sp->isr_cnt);
+                               return IRQ_HANDLED;
+                       }
+                       clear_bit(0, (&sp->tasklet_status));
+               } else if (level == LOW) {
+                       tasklet_schedule(&sp->task);
+               }
+       }
+
+       atomic_dec(&sp->isr_cnt);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t
+s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+       ring_info_t *ring = (ring_info_t *)dev_id;
+       nic_t *sp = ring->nic;
+       int rxb_size, level, rng_n;
+
+       atomic_inc(&sp->isr_cnt);
+       rx_intr_handler(ring);
+
+       rng_n = ring->ring_no;
+       rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
+       level = rx_buffer_level(sp, rxb_size, rng_n);
+
+       if ((level == PANIC) && (!TASKLET_IN_USE)) {
+               int ret;
+               DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
+               DBG_PRINT(INTR_DBG, "PANIC levels\n");
+               if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
+                       DBG_PRINT(ERR_DBG, "Out of memory in %s",
+                                 __FUNCTION__);
+                       clear_bit(0, (&sp->tasklet_status));
+                       return IRQ_HANDLED;
+               }
+               clear_bit(0, (&sp->tasklet_status));
+       } else if (level == LOW) {
+               tasklet_schedule(&sp->task);
+       }
+       atomic_dec(&sp->isr_cnt);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t
+s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+       fifo_info_t *fifo = (fifo_info_t *)dev_id;
+       nic_t *sp = fifo->nic;
+
+       atomic_inc(&sp->isr_cnt);
+       tx_intr_handler(fifo);
+       atomic_dec(&sp->isr_cnt);
+       return IRQ_HANDLED;
+}
+
 static void s2io_txpic_intr_handle(nic_t *sp)
 {
        XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3778,11 +4179,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
 {
        nic_t *sp = dev->priv;
 
-       strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
-       strncpy(info->version, s2io_driver_version,
-               sizeof(s2io_driver_version));
-       strncpy(info->fw_version, "", 32);
-       strncpy(info->bus_info, pci_name(sp->pdev), 32);
+       strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
+       strncpy(info->version, s2io_driver_version, sizeof(info->version));
+       strncpy(info->fw_version, "", sizeof(info->fw_version));
+       strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
        info->regdump_len = XENA_REG_SPACE;
        info->eedump_len = XENA_EEPROM_SPACE;
        info->testinfo_len = S2IO_TEST_LEN;
@@ -3978,29 +4378,53 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
  */
 
 #define S2IO_DEV_ID            5
-static int read_eeprom(nic_t * sp, int off, u32 * data)
+static int read_eeprom(nic_t * sp, int off, u64 * data)
 {
        int ret = -1;
        u32 exit_cnt = 0;
        u64 val64;
        XENA_dev_config_t __iomem *bar0 = sp->bar0;
 
-       val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
-           I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
-           I2C_CONTROL_CNTL_START;
-       SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+       if (sp->device_type == XFRAME_I_DEVICE) {
+               val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
+                   I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
+                   I2C_CONTROL_CNTL_START;
+               SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
 
-       while (exit_cnt < 5) {
-               val64 = readq(&bar0->i2c_control);
-               if (I2C_CONTROL_CNTL_END(val64)) {
-                       *data = I2C_CONTROL_GET_DATA(val64);
-                       ret = 0;
-                       break;
+               while (exit_cnt < 5) {
+                       val64 = readq(&bar0->i2c_control);
+                       if (I2C_CONTROL_CNTL_END(val64)) {
+                               *data = I2C_CONTROL_GET_DATA(val64);
+                               ret = 0;
+                               break;
+                       }
+                       msleep(50);
+                       exit_cnt++;
                }
-               msleep(50);
-               exit_cnt++;
        }
 
+       if (sp->device_type == XFRAME_II_DEVICE) {
+               val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
+                       SPI_CONTROL_BYTECNT(0x3) | 
+                       SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
+               SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+               val64 |= SPI_CONTROL_REQ;
+               SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+               while (exit_cnt < 5) {
+                       val64 = readq(&bar0->spi_control);
+                       if (val64 & SPI_CONTROL_NACK) {
+                               ret = 1;
+                               break;
+                       } else if (val64 & SPI_CONTROL_DONE) {
+                               *data = readq(&bar0->spi_data);
+                               *data &= 0xffffff;
+                               ret = 0;
+                               break;
+                       }
+                       msleep(50);
+                       exit_cnt++;
+               }
+       }
        return ret;
 }
 
@@ -4019,28 +4443,53 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
  *  0 on success, -1 on failure.
  */
 
-static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
+static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
 {
        int exit_cnt = 0, ret = -1;
        u64 val64;
        XENA_dev_config_t __iomem *bar0 = sp->bar0;
 
-       val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
-           I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
-           I2C_CONTROL_CNTL_START;
-       SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+       if (sp->device_type == XFRAME_I_DEVICE) {
+               val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
+                   I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
+                   I2C_CONTROL_CNTL_START;
+               SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+
+               while (exit_cnt < 5) {
+                       val64 = readq(&bar0->i2c_control);
+                       if (I2C_CONTROL_CNTL_END(val64)) {
+                               if (!(val64 & I2C_CONTROL_NACK))
+                                       ret = 0;
+                               break;
+                       }
+                       msleep(50);
+                       exit_cnt++;
+               }
+       }
 
-       while (exit_cnt < 5) {
-               val64 = readq(&bar0->i2c_control);
-               if (I2C_CONTROL_CNTL_END(val64)) {
-                       if (!(val64 & I2C_CONTROL_NACK))
+       if (sp->device_type == XFRAME_II_DEVICE) {
+               int write_cnt = (cnt == 8) ? 0 : cnt;
+               writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
+
+               val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
+                       SPI_CONTROL_BYTECNT(write_cnt) | 
+                       SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
+               SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+               val64 |= SPI_CONTROL_REQ;
+               SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+               while (exit_cnt < 5) {
+                       val64 = readq(&bar0->spi_control);
+                       if (val64 & SPI_CONTROL_NACK) {
+                               ret = 1;
+                               break;
+                       } else if (val64 & SPI_CONTROL_DONE) {
                                ret = 0;
-                       break;
+                               break;
+                       }
+                       msleep(50);
+                       exit_cnt++;
                }
-               msleep(50);
-               exit_cnt++;
        }
-
        return ret;
 }
 
@@ -4060,7 +4509,8 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
 static int s2io_ethtool_geeprom(struct net_device *dev,
                         struct ethtool_eeprom *eeprom, u8 * data_buf)
 {
-       u32 data, i, valid;
+       u32 i, valid;
+       u64 data;
        nic_t *sp = dev->priv;
 
        eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
@@ -4098,7 +4548,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
                                u8 * data_buf)
 {
        int len = eeprom->len, cnt = 0;
-       u32 valid = 0, data;
+       u64 valid = 0, data;
        nic_t *sp = dev->priv;
 
        if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
@@ -4146,7 +4596,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
 static int s2io_register_test(nic_t * sp, uint64_t * data)
 {
        XENA_dev_config_t __iomem *bar0 = sp->bar0;
-       u64 val64 = 0;
+       u64 val64 = 0, exp_val;
        int fail = 0;
 
        val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -4162,7 +4612,11 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
        }
 
        val64 = readq(&bar0->rx_queue_cfg);
-       if (val64 != 0x0808080808080808ULL) {
+       if (sp->device_type == XFRAME_II_DEVICE)
+               exp_val = 0x0404040404040404ULL;
+       else
+               exp_val = 0x0808080808080808ULL;
+       if (val64 != exp_val) {
                fail = 1;
                DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
        }
@@ -4190,7 +4644,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
        }
 
        *data = fail;
-       return 0;
+       return fail;
 }
 
 /**
@@ -4209,58 +4663,83 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
 {
        int fail = 0;
-       u32 ret_data;
+       u64 ret_data, org_4F0, org_7F0;
+       u8 saved_4F0 = 0, saved_7F0 = 0;
+       struct net_device *dev = sp->dev;
 
        /* Test Write Error at offset 0 */
-       if (!write_eeprom(sp, 0, 0, 3))
-               fail = 1;
+       /* Note that SPI interface allows write access to all areas
+        * of EEPROM. Hence doing all negative testing only for Xframe I.
+        */
+       if (sp->device_type == XFRAME_I_DEVICE)
+               if (!write_eeprom(sp, 0, 0, 3))
+                       fail = 1;
+
+       /* Save current values at offsets 0x4F0 and 0x7F0 */
+       if (!read_eeprom(sp, 0x4F0, &org_4F0))
+               saved_4F0 = 1;
+       if (!read_eeprom(sp, 0x7F0, &org_7F0))
+               saved_7F0 = 1;
 
        /* Test Write at offset 4f0 */
-       if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
+       if (write_eeprom(sp, 0x4F0, 0x012345, 3))
                fail = 1;
        if (read_eeprom(sp, 0x4F0, &ret_data))
                fail = 1;
 
-       if (ret_data != 0x01234567)
+       if (ret_data != 0x012345) {
+               DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); 
                fail = 1;
+       }
 
        /* Reset the EEPROM data go FFFF */
-       write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
+       write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
 
        /* Test Write Request Error at offset 0x7c */
-       if (!write_eeprom(sp, 0x07C, 0, 3))
-               fail = 1;
+       if (sp->device_type == XFRAME_I_DEVICE)
+               if (!write_eeprom(sp, 0x07C, 0, 3))
+                       fail = 1;
 
-       /* Test Write Request at offset 0x7fc */
-       if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
+       /* Test Write Request at offset 0x7f0 */
+       if (write_eeprom(sp, 0x7F0, 0x012345, 3))
                fail = 1;
-       if (read_eeprom(sp, 0x7FC, &ret_data))
+       if (read_eeprom(sp, 0x7F0, &ret_data))
                fail = 1;
 
-       if (ret_data != 0x01234567)
+       if (ret_data != 0x012345) {
+               DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); 
                fail = 1;
+       }
 
        /* Reset the EEPROM data go FFFF */
-       write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
+       write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
 
-       /* Test Write Error at offset 0x80 */
-       if (!write_eeprom(sp, 0x080, 0, 3))
-               fail = 1;
+       if (sp->device_type == XFRAME_I_DEVICE) {
+               /* Test Write Error at offset 0x80 */
+               if (!write_eeprom(sp, 0x080, 0, 3))
+                       fail = 1;
 
-       /* Test Write Error at offset 0xfc */
-       if (!write_eeprom(sp, 0x0FC, 0, 3))
-               fail = 1;
+               /* Test Write Error at offset 0xfc */
+               if (!write_eeprom(sp, 0x0FC, 0, 3))
+                       fail = 1;
 
-       /* Test Write Error at offset 0x100 */
-       if (!write_eeprom(sp, 0x100, 0, 3))
-               fail = 1;
+               /* Test Write Error at offset 0x100 */
+               if (!write_eeprom(sp, 0x100, 0, 3))
+                       fail = 1;
 
-       /* Test Write Error at offset 4ec */
-       if (!write_eeprom(sp, 0x4EC, 0, 3))
-               fail = 1;
+               /* Test Write Error at offset 4ec */
+               if (!write_eeprom(sp, 0x4EC, 0, 3))
+                       fail = 1;
+       }
+
+       /* Restore values at offsets 0x4F0 and 0x7F0 */
+       if (saved_4F0)
+               write_eeprom(sp, 0x4F0, org_4F0, 3);
+       if (saved_7F0)
+               write_eeprom(sp, 0x7F0, org_7F0, 3);
 
        *data = fail;
-       return 0;
+       return fail;
 }
 
 /**
@@ -4342,7 +4821,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
 {
        XENA_dev_config_t __iomem *bar0 = sp->bar0;
        u64 val64;
-       int cnt, iteration = 0, test_pass = 0;
+       int cnt, iteration = 0, test_fail = 0;
 
        val64 = readq(&bar0->adapter_control);
        val64 &= ~ADAPTER_ECC_EN;
@@ -4350,7 +4829,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
 
        val64 = readq(&bar0->mc_rldram_test_ctrl);
        val64 |= MC_RLDRAM_TEST_MODE;
-       writeq(val64, &bar0->mc_rldram_test_ctrl);
+       SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
 
        val64 = readq(&bar0->mc_rldram_mrs);
        val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
@@ -4378,17 +4857,12 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
                }
                writeq(val64, &bar0->mc_rldram_test_d2);
 
-               val64 = (u64) (0x0000003fffff0000ULL);
+               val64 = (u64) (0x0000003ffffe0100ULL);
                writeq(val64, &bar0->mc_rldram_test_add);
 
-
-               val64 = MC_RLDRAM_TEST_MODE;
-               writeq(val64, &bar0->mc_rldram_test_ctrl);
-
-               val64 |=
-                   MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
-                   MC_RLDRAM_TEST_GO;
-               writeq(val64, &bar0->mc_rldram_test_ctrl);
+               val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
+                       MC_RLDRAM_TEST_GO;
+               SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
 
                for (cnt = 0; cnt < 5; cnt++) {
                        val64 = readq(&bar0->mc_rldram_test_ctrl);
@@ -4400,11 +4874,8 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
                if (cnt == 5)
                        break;
 
-               val64 = MC_RLDRAM_TEST_MODE;
-               writeq(val64, &bar0->mc_rldram_test_ctrl);
-
-               val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
-               writeq(val64, &bar0->mc_rldram_test_ctrl);
+               val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
+               SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
 
                for (cnt = 0; cnt < 5; cnt++) {
                        val64 = readq(&bar0->mc_rldram_test_ctrl);
@@ -4417,18 +4888,18 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
                        break;
 
                val64 = readq(&bar0->mc_rldram_test_ctrl);
-               if (val64 & MC_RLDRAM_TEST_PASS)
-                       test_pass = 1;
+               if (!(val64 & MC_RLDRAM_TEST_PASS))
+                       test_fail = 1;
 
                iteration++;
        }
 
-       if (!test_pass)
-               *data = 1;
-       else
-               *data = 0;
+       *data = test_fail;
 
-       return 0;
+       /* Bring the adapter out of test mode */
+       SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
+
+       return test_fail;
 }
 
 /**
@@ -4932,7 +5403,7 @@ static void s2io_card_down(nic_t * sp)
 
 static int s2io_card_up(nic_t * sp)
 {
-       int i, ret;
+       int i, ret = 0;
        mac_info_t *mac_control;
        struct config_param *config;
        struct net_device *dev = (struct net_device *) sp->dev;
@@ -4944,6 +5415,15 @@ static int s2io_card_up(nic_t * sp)
                return -ENODEV;
        }
 
+       if (sp->intr_type == MSI)
+               ret = s2io_enable_msi(sp);
+       else if (sp->intr_type == MSI_X)
+               ret = s2io_enable_msi_x(sp);
+       if (ret) {
+               DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
+               sp->intr_type = INTA;
+       }
+
        /*
         * Initializing the Rx buffers. For now we are considering only 1
         * Rx ring and initializing buffers into 30 Rx blocks
@@ -5228,6 +5708,8 @@ static void s2io_init_pci(nic_t * sp)
 
 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
 MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
 module_param(tx_fifo_num, int, 0);
 module_param(rx_ring_num, int, 0);
 module_param_array(tx_fifo_len, uint, NULL, 0);
@@ -5245,6 +5727,7 @@ module_param(bimodal, bool, 0);
 module_param(indicate_max_pkts, int, 0);
 #endif
 module_param(rxsync_frequency, int, 0);
+module_param(intr_type, int, 0);
 
 /**
  *  s2io_init_nic - Initialization of the adapter .
@@ -5274,9 +5757,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        mac_info_t *mac_control;
        struct config_param *config;
        int mode;
+       u8 dev_intr_type = intr_type;
 
 #ifdef CONFIG_S2IO_NAPI
-       DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
+       if (dev_intr_type != INTA) {
+               DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
+is enabled. Defaulting to INTA\n");
+               dev_intr_type = INTA;
+       }
+       else
+               DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
 #endif
 
        if ((ret = pci_enable_device(pdev))) {
@@ -5303,10 +5793,35 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
                return -ENOMEM;
        }
 
-       if (pci_request_regions(pdev, s2io_driver_name)) {
-               DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
-                   pci_disable_device(pdev);
-               return -ENODEV;
+       if ((dev_intr_type == MSI_X) && 
+                       ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
+                       (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
+               DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
+Defaulting to INTA\n");
+               dev_intr_type = INTA;
+       }
+       if (dev_intr_type != MSI_X) {
+               if (pci_request_regions(pdev, s2io_driver_name)) {
+                       DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
+                           pci_disable_device(pdev);
+                       return -ENODEV;
+               }
+       }
+       else {
+               if (!(request_mem_region(pci_resource_start(pdev, 0),
+                                pci_resource_len(pdev, 0), s2io_driver_name))) {
+                       DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
+                       pci_disable_device(pdev);
+                       return -ENODEV;
+               }
+               if (!(request_mem_region(pci_resource_start(pdev, 2),
+                                pci_resource_len(pdev, 2), s2io_driver_name))) {
+                       DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
+                       release_mem_region(pci_resource_start(pdev, 0),
+                                   pci_resource_len(pdev, 0));
+                       pci_disable_device(pdev);
+                       return -ENODEV;
+               }
        }
 
        dev = alloc_etherdev(sizeof(nic_t));
@@ -5329,6 +5844,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        sp->pdev = pdev;
        sp->high_dma_flag = dma_flag;
        sp->device_enabled_once = FALSE;
+       sp->intr_type = dev_intr_type;
 
        if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
                (pdev->device == PCI_DEVICE_ID_HERC_UNI))
@@ -5336,6 +5852,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        else
                sp->device_type = XFRAME_I_DEVICE;
 
+               
        /* Initialize some PCI/PCI-X fields of the NIC. */
        s2io_init_pci(sp);
 
@@ -5571,12 +6088,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        if (sp->device_type & XFRAME_II_DEVICE) {
                DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
                          dev->name);
-               DBG_PRINT(ERR_DBG, "(rev %d), %s",
+               DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
                                get_xena_rev_id(sp->pdev),
                                s2io_driver_version);
 #ifdef CONFIG_2BUFF_MODE
                DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
 #endif
+               switch(sp->intr_type) {
+                       case INTA:
+                               DBG_PRINT(ERR_DBG, ", Intr type INTA");
+                               break;
+                       case MSI:
+                               DBG_PRINT(ERR_DBG, ", Intr type MSI");
+                               break;
+                       case MSI_X:
+                               DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
+                               break;
+               }
 
                DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
                DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -5595,12 +6123,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        } else {
                DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
                          dev->name);
-               DBG_PRINT(ERR_DBG, "(rev %d), %s",
+               DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
                                        get_xena_rev_id(sp->pdev),
                                        s2io_driver_version);
 #ifdef CONFIG_2BUFF_MODE
                DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
 #endif
+               switch(sp->intr_type) {
+                       case INTA:
+                               DBG_PRINT(ERR_DBG, ", Intr type INTA");
+                               break;
+                       case MSI:
+                               DBG_PRINT(ERR_DBG, ", Intr type MSI");
+                               break;
+                       case MSI_X:
+                               DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
+                               break;
+               }
                DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
                DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
                          sp->def_mac_addr[0].mac_addr[0],
@@ -5644,7 +6183,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
       mem_alloc_failed:
        free_shared_mem(sp);
        pci_disable_device(pdev);
-       pci_release_regions(pdev);
+       if (dev_intr_type != MSI_X)
+               pci_release_regions(pdev);
+       else {
+               release_mem_region(pci_resource_start(pdev, 0),
+                       pci_resource_len(pdev, 0));
+               release_mem_region(pci_resource_start(pdev, 2),
+                       pci_resource_len(pdev, 2));
+       }
        pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
@@ -5678,7 +6224,14 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
        iounmap(sp->bar0);
        iounmap(sp->bar1);
        pci_disable_device(pdev);
-       pci_release_regions(pdev);
+       if (sp->intr_type != MSI_X)
+               pci_release_regions(pdev);
+       else {
+               release_mem_region(pci_resource_start(pdev, 0),
+                       pci_resource_len(pdev, 0));
+               release_mem_region(pci_resource_start(pdev, 2),
+                       pci_resource_len(pdev, 2));
+       }
        pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
index 89151cb5218132414efae178eddb05f6f0515d26..1cc24b56760e1fe1773eb48cbcdecefdb27cc451 100644 (file)
@@ -652,6 +652,30 @@ typedef struct {
 #define SMALL_BLK_CNT  30
 #define LARGE_BLK_CNT  100
 
+/*
+ * Structure to keep track of the MSI-X vectors and the corresponding
+ * argument registered against each vector
+ */
+#define MAX_REQUESTED_MSI_X    17
+struct s2io_msix_entry
+{
+       u16 vector;
+       u16 entry;
+       void *arg;
+
+       u8 type;
+#define        MSIX_FIFO_TYPE  1
+#define        MSIX_RING_TYPE  2
+
+       u8 in_use;
+#define MSIX_REGISTERED_SUCCESS        0xAA
+};
+
+struct msix_info_st {
+       u64 addr;
+       u64 data;
+};
+
 /* Structure representing one instance of the NIC */
 struct s2io_nic {
 #ifdef CONFIG_S2IO_NAPI
@@ -719,13 +743,8 @@ struct s2io_nic {
         *  a schedule task that will set the correct Link state once the
         *  NIC's PHY has stabilized after a state change.
         */
-#ifdef INIT_TQUEUE
-       struct tq_struct rst_timer_task;
-       struct tq_struct set_link_task;
-#else
        struct work_struct rst_timer_task;
        struct work_struct set_link_task;
-#endif
 
        /* Flag that can be used to turn on or turn off the Rx checksum
         * offload feature.
@@ -748,10 +767,23 @@ struct s2io_nic {
        atomic_t card_state;
        volatile unsigned long link_state;
        struct vlan_group *vlgrp;
+#define MSIX_FLG                0xA5
+       struct msix_entry *entries;
+       struct s2io_msix_entry *s2io_entries;
+       char desc1[35];
+       char desc2[35];
+
+       struct msix_info_st msix_info[0x3f];
+
 #define XFRAME_I_DEVICE                1
 #define XFRAME_II_DEVICE       2
        u8 device_type;
 
+#define INTA   0
+#define MSI    1
+#define MSI_X  2
+       u8 intr_type;
+
        spinlock_t      rx_lock;
        atomic_t        isr_cnt;
 };
@@ -886,6 +918,13 @@ static int s2io_poll(struct net_device *dev, int *budget);
 static void s2io_init_pci(nic_t * sp);
 int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
 static void s2io_alarm_handle(unsigned long data);
+static int s2io_enable_msi(nic_t *nic);
+static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t
+s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t
+s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs);
+int s2io_enable_msi_x(nic_t *nic);
 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
 static struct ethtool_ops netdev_ethtool_ops;
@@ -894,4 +933,5 @@ int s2io_set_swapper(nic_t * sp);
 static void s2io_card_down(nic_t *nic);
 static int s2io_card_up(nic_t *nic);
 int get_xena_rev_id(struct pci_dev *pdev);
+void restore_xmsi_data(nic_t *nic);
 #endif                         /* _S2IO_H */
index 7abd55a4fb21fecf9e0e6204fafb490117ca464a..aa4ca182175909c93f6b70eaa2b46e6370d8cbcd 100644 (file)
@@ -10,7 +10,7 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
@@ -118,8 +118,6 @@ MODULE_PARM_DESC(int_timeout, "Timeout value");
  ********************************************************************* */
 
 
-typedef unsigned long sbmac_port_t;
-
 typedef enum { sbmac_speed_auto, sbmac_speed_10,
               sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
 
@@ -129,7 +127,7 @@ typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
               sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
 
-typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, 
+typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
               sbmac_state_broken } sbmac_state_t;
 
 
@@ -144,17 +142,13 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
 
 #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
 
-#define SBMAC_READCSR(t)       __raw_readq((unsigned long)t)
-#define SBMAC_WRITECSR(t,v)    __raw_writeq(v, (unsigned long)t)
-
 #define SBMAC_MAX_TXDESCR      32
 #define SBMAC_MAX_RXDESCR      32
 
 #define ETHER_ALIGN    2
 #define ETHER_ADDR_LEN 6
-#define ENET_PACKET_SIZE       1518 
-/*#define ENET_PACKET_SIZE     9216 */ 
+#define ENET_PACKET_SIZE       1518
+/*#define ENET_PACKET_SIZE     9216 */
 
 /**********************************************************************
  *  DMA Descriptor structure
@@ -172,12 +166,12 @@ typedef unsigned long paddr_t;
  ********************************************************************* */
 
 typedef struct sbmacdma_s {
-       
-       /* 
+
+       /*
         * This stuff is used to identify the channel and the registers
         * associated with it.
         */
-       
+
        struct sbmac_softc *sbdma_eth;          /* back pointer to associated MAC */
        int              sbdma_channel; /* channel number */
        int              sbdma_txdir;       /* direction (1=transmit) */
@@ -187,21 +181,21 @@ typedef struct sbmacdma_s {
        int              sbdma_int_timeout; /* # usec rx/tx interrupt */
 #endif
 
-       sbmac_port_t     sbdma_config0; /* DMA config register 0 */
-       sbmac_port_t     sbdma_config1; /* DMA config register 1 */
-       sbmac_port_t     sbdma_dscrbase;        /* Descriptor base address */
-       sbmac_port_t     sbdma_dscrcnt;     /* Descriptor count register */
-       sbmac_port_t     sbdma_curdscr; /* current descriptor address */
-       
+       volatile void __iomem *sbdma_config0;   /* DMA config register 0 */
+       volatile void __iomem *sbdma_config1;   /* DMA config register 1 */
+       volatile void __iomem *sbdma_dscrbase;  /* Descriptor base address */
+       volatile void __iomem *sbdma_dscrcnt;     /* Descriptor count register */
+       volatile void __iomem *sbdma_curdscr;   /* current descriptor address */
+
        /*
         * This stuff is for maintenance of the ring
         */
-       
+
        sbdmadscr_t     *sbdma_dscrtable;       /* base of descriptor table */
        sbdmadscr_t     *sbdma_dscrtable_end; /* end of descriptor table */
-       
+
        struct sk_buff **sbdma_ctxtable;    /* context table, one per descr */
-       
+
        paddr_t          sbdma_dscrtable_phys; /* and also the phys addr */
        sbdmadscr_t     *sbdma_addptr;  /* next dscr for sw to add */
        sbdmadscr_t     *sbdma_remptr;  /* next dscr for sw to remove */
@@ -213,15 +207,15 @@ typedef struct sbmacdma_s {
  ********************************************************************* */
 
 struct sbmac_softc {
-       
+
        /*
         * Linux-specific things
         */
-       
+
        struct net_device *sbm_dev;             /* pointer to linux device */
        spinlock_t sbm_lock;            /* spin lock */
        struct timer_list sbm_timer;            /* for monitoring MII */
-       struct net_device_stats sbm_stats; 
+       struct net_device_stats sbm_stats;
        int sbm_devflags;                       /* current device flags */
 
        int          sbm_phy_oldbmsr;
@@ -229,31 +223,31 @@ struct sbmac_softc {
        int          sbm_phy_oldk1stsr;
        int          sbm_phy_oldlinkstat;
        int sbm_buffersize;
-       
+
        unsigned char sbm_phys[2];
-       
+
        /*
         * Controller-specific things
         */
-       
-       unsigned long   sbm_base;          /* MAC's base address */
+
+       volatile void __iomem *sbm_base;          /* MAC's base address */
        sbmac_state_t    sbm_state;         /* current state */
-       
-       sbmac_port_t     sbm_macenable; /* MAC Enable Register */
-       sbmac_port_t     sbm_maccfg;    /* MAC Configuration Register */
-       sbmac_port_t     sbm_fifocfg;   /* FIFO configuration register */
-       sbmac_port_t     sbm_framecfg;  /* Frame configuration register */
-       sbmac_port_t     sbm_rxfilter;  /* receive filter register */
-       sbmac_port_t     sbm_isr;               /* Interrupt status register */
-       sbmac_port_t     sbm_imr;               /* Interrupt mask register */
-       sbmac_port_t     sbm_mdio;              /* MDIO register */
-       
+
+       volatile void __iomem   *sbm_macenable; /* MAC Enable Register */
+       volatile void __iomem   *sbm_maccfg;    /* MAC Configuration Register */
+       volatile void __iomem   *sbm_fifocfg;   /* FIFO configuration register */
+       volatile void __iomem   *sbm_framecfg;  /* Frame configuration register */
+       volatile void __iomem   *sbm_rxfilter;  /* receive filter register */
+       volatile void __iomem   *sbm_isr;       /* Interrupt status register */
+       volatile void __iomem   *sbm_imr;       /* Interrupt mask register */
+       volatile void __iomem   *sbm_mdio;      /* MDIO register */
+
        sbmac_speed_t    sbm_speed;             /* current speed */
        sbmac_duplex_t   sbm_duplex;    /* current duplex */
        sbmac_fc_t       sbm_fc;                /* current flow control setting */
-       
+
        unsigned char    sbm_hwaddr[ETHER_ADDR_LEN];
-       
+
        sbmacdma_t       sbm_txdma;             /* for now, only use channel 0 */
        sbmacdma_t       sbm_rxdma;
        int              rx_hw_checksum;
@@ -302,6 +296,7 @@ static void sbmac_set_rx_mode(struct net_device *dev);
 static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static int sbmac_close(struct net_device *dev);
 static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
+static int sbmac_mii_probe(struct net_device *dev);
 
 static void sbmac_mii_sync(struct sbmac_softc *s);
 static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
@@ -439,6 +434,9 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
 
 #define        MII_BMCR        0x00    /* Basic mode control register (rw) */
 #define        MII_BMSR        0x01    /* Basic mode status register (ro) */
+#define        MII_PHYIDR1     0x02
+#define        MII_PHYIDR2     0x03
+
 #define MII_K1STSR     0x0A    /* 1K Status Register (ro) */
 #define        MII_ANLPAR      0x05    /* Autonegotiation lnk partner abilities (rw) */
 
@@ -450,13 +448,13 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
 
 /**********************************************************************
  *  SBMAC_MII_SYNC(s)
- *  
+ *
  *  Synchronize with the MII - send a pattern of bits to the MII
  *  that will guarantee that it is ready to accept a command.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -467,25 +465,25 @@ static void sbmac_mii_sync(struct sbmac_softc *s)
        uint64_t bits;
        int mac_mdio_genc;
 
-       mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
-       
+       mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
+
        bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
-       
-       SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
-       
+
+       __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
+
        for (cnt = 0; cnt < 32; cnt++) {
-               SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc);
-               SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+               __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+               __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
        }
 }
 
 /**********************************************************************
  *  SBMAC_MII_SENDDATA(s,data,bitcnt)
- *  
+ *
  *  Send some bits to the MII.  The bits to be sent are right-
  *  justified in the 'data' parameter.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
  *        data - data to send
  *        bitcnt - number of bits to send
@@ -498,20 +496,20 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc
        unsigned int curmask;
        int mac_mdio_genc;
 
-       mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
-       
+       mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
+
        bits = M_MAC_MDIO_DIR_OUTPUT;
-       SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
-       
+       __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
+
        curmask = 1 << (bitcnt - 1);
-       
+
        for (i = 0; i < bitcnt; i++) {
                if (data & curmask)
                        bits |= M_MAC_MDIO_OUT;
                else bits &= ~M_MAC_MDIO_OUT;
-               SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
-               SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc);
-               SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+               __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
+               __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+               __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
                curmask >>= 1;
        }
 }
@@ -520,14 +518,14 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc
 
 /**********************************************************************
  *  SBMAC_MII_READ(s,phyaddr,regidx)
- *  
+ *
  *  Read a PHY register.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
  *        phyaddr - PHY's address
  *        regidx = index of register to read
- *        
+ *
  *  Return value:
  *        value read, or 0 if an error occurred.
  ********************************************************************* */
@@ -543,9 +541,9 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
         * Synchronize ourselves so that the PHY knows the next
         * thing coming down is a command
         */
-       
+
        sbmac_mii_sync(s);
-       
+
        /*
         * Send the data to the PHY.  The sequence is
         * a "start" command (2 bits)
@@ -553,59 +551,55 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
         * the PHY addr (5 bits)
         * the register index (5 bits)
         */
-       
+
        sbmac_mii_senddata(s,MII_COMMAND_START, 2);
        sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
        sbmac_mii_senddata(s,phyaddr, 5);
        sbmac_mii_senddata(s,regidx, 5);
-       
-       mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
-       
-       /* 
+
+       mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
+
+       /*
         * Switch the port around without a clock transition.
         */
-       SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
-       
+       __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
+
        /*
         * Send out a clock pulse to signal we want the status
         */
-       
-       SBMAC_WRITECSR(s->sbm_mdio,
-                      M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc);
-       SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
-       
-       /* 
+
+       __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+       __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
+
+       /*
         * If an error occurred, the PHY will signal '1' back
         */
-       error = SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN;
-       
-       /* 
+       error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN;
+
+       /*
         * Issue an 'idle' clock pulse, but keep the direction
         * the same.
         */
-       SBMAC_WRITECSR(s->sbm_mdio,
-                      M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc);
-       SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
-       
+       __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+       __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
+
        regval = 0;
-       
+
        for (idx = 0; idx < 16; idx++) {
                regval <<= 1;
-               
+
                if (error == 0) {
-                       if (SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN)
+                       if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN)
                                regval |= 1;
                }
-               
-               SBMAC_WRITECSR(s->sbm_mdio,
-                              M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc);
-               SBMAC_WRITECSR(s->sbm_mdio,
-                              M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
+
+               __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+               __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
        }
-       
+
        /* Switch back to output */
-       SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc);
-       
+       __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
+
        if (error == 0)
                return regval;
        return 0;
@@ -614,15 +608,15 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
 
 /**********************************************************************
  *  SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
- *  
+ *
  *  Write a value to a PHY register.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
  *        phyaddr - PHY to use
  *        regidx - register within the PHY
  *        regval - data to write to register
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -633,7 +627,7 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
        int mac_mdio_genc;
 
        sbmac_mii_sync(s);
-       
+
        sbmac_mii_senddata(s,MII_COMMAND_START,2);
        sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
        sbmac_mii_senddata(s,phyaddr, 5);
@@ -641,27 +635,27 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
        sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
        sbmac_mii_senddata(s,regval,16);
 
-       mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
+       mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
 
-       SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc);
+       __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
 }
 
 
 
 /**********************************************************************
  *  SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
- *  
+ *
  *  Initialize a DMA channel context.  Since there are potentially
  *  eight DMA channels per MAC, it's nice to do this in a standard
- *  way.  
- *  
- *  Input parameters: 
+ *  way.
+ *
+ *  Input parameters:
  *        d - sbmacdma_t structure (DMA channel context)
  *        s - sbmac_softc structure (pointer to a MAC)
  *        chan - channel number (0..1 right now)
  *        txrx - Identifies DMA_TX or DMA_RX for channel direction
  *      maxdescr - number of descriptors
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -672,101 +666,87 @@ static void sbdma_initctx(sbmacdma_t *d,
                          int txrx,
                          int maxdescr)
 {
-       /* 
-        * Save away interesting stuff in the structure 
+       /*
+        * Save away interesting stuff in the structure
         */
-       
+
        d->sbdma_eth       = s;
        d->sbdma_channel   = chan;
        d->sbdma_txdir     = txrx;
-       
+
 #if 0
        /* RMON clearing */
        s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
 #endif
 
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)), 0);
-       SBMAC_WRITECSR(IOADDR(
-       A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)), 0);
-
-       /* 
-        * initialize register pointers 
-        */
-       
-       d->sbdma_config0 = 
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)));
+       __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)));
+
+       /*
+        * initialize register pointers
+        */
+
+       d->sbdma_config0 =
                s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
-       d->sbdma_config1 = 
+       d->sbdma_config1 =
                s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
-       d->sbdma_dscrbase = 
+       d->sbdma_dscrbase =
                s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
-       d->sbdma_dscrcnt = 
+       d->sbdma_dscrcnt =
                s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
-       d->sbdma_curdscr =      
+       d->sbdma_curdscr =
                s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
-       
+
        /*
         * Allocate memory for the ring
         */
-       
+
        d->sbdma_maxdescr = maxdescr;
-       
-       d->sbdma_dscrtable = (sbdmadscr_t *) 
-               kmalloc(d->sbdma_maxdescr*sizeof(sbdmadscr_t), GFP_KERNEL);
-       
+
+       d->sbdma_dscrtable = (sbdmadscr_t *)
+               kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
+
+       /*
+        * The descriptor table must be aligned to at least 16 bytes or the
+        * MAC will corrupt it.
+        */
+       d->sbdma_dscrtable = (sbdmadscr_t *)
+               ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t));
+
        memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
-       
+
        d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
-       
+
        d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
-       
+
        /*
         * And context table
         */
-       
-       d->sbdma_ctxtable = (struct sk_buff **) 
+
+       d->sbdma_ctxtable = (struct sk_buff **)
                kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
-       
+
        memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
-       
+
 #ifdef CONFIG_SBMAC_COALESCE
        /*
         * Setup Rx/Tx DMA coalescing defaults
@@ -777,7 +757,7 @@ static void sbdma_initctx(sbmacdma_t *d,
        } else {
                d->sbdma_int_pktcnt = 1;
        }
-       
+
        if ( int_timeout ) {
                d->sbdma_int_timeout = int_timeout;
        } else {
@@ -789,13 +769,13 @@ static void sbdma_initctx(sbmacdma_t *d,
 
 /**********************************************************************
  *  SBDMA_CHANNEL_START(d)
- *  
+ *
  *  Initialize the hardware registers for a DMA channel.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        d - DMA channel to init (context must be previously init'd
  *         rxtx - DMA_RX or DMA_TX depending on what type of channel
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -805,24 +785,21 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
        /*
         * Turn on the DMA channel
         */
-       
+
 #ifdef CONFIG_SBMAC_COALESCE
-       SBMAC_WRITECSR(d->sbdma_config1,
-                      V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
-                      0);
-       SBMAC_WRITECSR(d->sbdma_config0,
-                      M_DMA_EOP_INT_EN |
+       __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
+                      0, d->sbdma_config1);
+       __raw_writeq(M_DMA_EOP_INT_EN |
                       V_DMA_RINGSZ(d->sbdma_maxdescr) |
                       V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
-                      0);
+                      0, d->sbdma_config0);
 #else
-       SBMAC_WRITECSR(d->sbdma_config1,0);
-       SBMAC_WRITECSR(d->sbdma_config0,
-                      V_DMA_RINGSZ(d->sbdma_maxdescr) |
-                      0);
+       __raw_writeq(0, d->sbdma_config1);
+       __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
+                      0, d->sbdma_config0);
 #endif
 
-       SBMAC_WRITECSR(d->sbdma_dscrbase,d->sbdma_dscrtable_phys);
+       __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
 
        /*
         * Initialize ring pointers
@@ -834,12 +811,12 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
 
 /**********************************************************************
  *  SBDMA_CHANNEL_STOP(d)
- *  
+ *
  *  Initialize the hardware registers for a DMA channel.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        d - DMA channel to init (context must be previously init'd
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -849,44 +826,44 @@ static void sbdma_channel_stop(sbmacdma_t *d)
        /*
         * Turn off the DMA channel
         */
-       
-       SBMAC_WRITECSR(d->sbdma_config1,0);
-       
-       SBMAC_WRITECSR(d->sbdma_dscrbase,0);
-       
-       SBMAC_WRITECSR(d->sbdma_config0,0);
-       
+
+       __raw_writeq(0, d->sbdma_config1);
+
+       __raw_writeq(0, d->sbdma_dscrbase);
+
+       __raw_writeq(0, d->sbdma_config0);
+
        /*
         * Zero ring pointers
         */
-       
-       d->sbdma_addptr = 0;
-       d->sbdma_remptr = 0;
+
+       d->sbdma_addptr = NULL;
+       d->sbdma_remptr = NULL;
 }
 
 static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
 {
        unsigned long addr;
        unsigned long newaddr;
-       
+
        addr = (unsigned long) skb->data;
-       
+
        newaddr = (addr + power2 - 1) & ~(power2 - 1);
-       
+
        skb_reserve(skb,newaddr-addr+offset);
 }
 
 
 /**********************************************************************
  *  SBDMA_ADD_RCVBUFFER(d,sb)
- *  
+ *
  *  Add a buffer to the specified DMA channel.   For receive channels,
  *  this queues a buffer for inbound packets.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        d - DMA channel descriptor
  *        sb - sk_buff to add, or NULL if we should allocate one
- *        
+ *
  *  Return value:
  *        0 if buffer could not be added (ring is full)
  *        1 if buffer added successfully
@@ -899,24 +876,24 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
        sbdmadscr_t *nextdsc;
        struct sk_buff *sb_new = NULL;
        int pktsize = ENET_PACKET_SIZE;
-       
+
        /* get pointer to our current place in the ring */
-       
+
        dsc = d->sbdma_addptr;
        nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
-       
+
        /*
         * figure out if the ring is full - if the next descriptor
         * is the same as the one that we're going to remove from
         * the ring, the ring is full
         */
-       
+
        if (nextdsc == d->sbdma_remptr) {
                return -ENOSPC;
        }
 
-       /* 
-        * Allocate a sk_buff if we don't already have one.  
+       /*
+        * Allocate a sk_buff if we don't already have one.
         * If we do have an sk_buff, reset it so that it's empty.
         *
         * Note: sk_buffs don't seem to be guaranteed to have any sort
@@ -925,7 +902,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
         *
         *    1. the data does not start in the middle of a cache line.
         *    2. The data does not end in the middle of a cache line
-        *    3. The buffer can be aligned such that the IP addresses are 
+        *    3. The buffer can be aligned such that the IP addresses are
         *       naturally aligned.
         *
         *  Remember, the SOCs MAC writes whole cache lines at a time,
@@ -933,7 +910,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
         *  data portion starts in the middle of a cache line, the SOC
         *  DMA will trash the beginning (and ending) portions.
         */
-       
+
        if (sb == NULL) {
                sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
                if (sb_new == NULL) {
@@ -949,23 +926,22 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
        }
        else {
                sb_new = sb;
-               /* 
+               /*
                 * nothing special to reinit buffer, it's already aligned
                 * and sb->data already points to a good place.
                 */
        }
-       
+
        /*
-        * fill in the descriptor 
+        * fill in the descriptor
         */
-       
+
 #ifdef CONFIG_SBMAC_COALESCE
        /*
         * Do not interrupt per DMA transfer.
         */
        dsc->dscr_a = virt_to_phys(sb_new->data) |
-               V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
-               0;
+               V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
 #else
        dsc->dscr_a = virt_to_phys(sb_new->data) |
                V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
@@ -974,38 +950,38 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
 
        /* receiving: no options */
        dsc->dscr_b = 0;
-       
+
        /*
-        * fill in the context 
+        * fill in the context
         */
-       
+
        d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
-       
-       /* 
-        * point at next packet 
+
+       /*
+        * point at next packet
         */
-       
+
        d->sbdma_addptr = nextdsc;
-       
-       /* 
+
+       /*
         * Give the buffer to the DMA engine.
         */
-       
-       SBMAC_WRITECSR(d->sbdma_dscrcnt,1);
-       
+
+       __raw_writeq(1, d->sbdma_dscrcnt);
+
        return 0;                                       /* we did it */
 }
 
 /**********************************************************************
  *  SBDMA_ADD_TXBUFFER(d,sb)
- *  
+ *
  *  Add a transmit buffer to the specified DMA channel, causing a
  *  transmit to start.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        d - DMA channel descriptor
  *        sb - sk_buff to add
- *        
+ *
  *  Return value:
  *        0 transmit queued successfully
  *        otherwise error code
@@ -1019,70 +995,70 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
        uint64_t phys;
        uint64_t ncb;
        int length;
-       
+
        /* get pointer to our current place in the ring */
-       
+
        dsc = d->sbdma_addptr;
        nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
-       
+
        /*
         * figure out if the ring is full - if the next descriptor
         * is the same as the one that we're going to remove from
         * the ring, the ring is full
         */
-       
+
        if (nextdsc == d->sbdma_remptr) {
                return -ENOSPC;
        }
-       
+
        /*
         * Under Linux, it's not necessary to copy/coalesce buffers
         * like it is on NetBSD.  We think they're all contiguous,
         * but that may not be true for GBE.
         */
-       
+
        length = sb->len;
-       
+
        /*
         * fill in the descriptor.  Note that the number of cache
         * blocks in the descriptor is the number of blocks
         * *spanned*, so we need to add in the offset (if any)
         * while doing the calculation.
         */
-       
+
        phys = virt_to_phys(sb->data);
        ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
 
-       dsc->dscr_a = phys | 
+       dsc->dscr_a = phys |
                V_DMA_DSCRA_A_SIZE(ncb) |
 #ifndef CONFIG_SBMAC_COALESCE
                M_DMA_DSCRA_INTERRUPT |
 #endif
                M_DMA_ETHTX_SOP;
-       
+
        /* transmitting: set outbound options and length */
 
        dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
                V_DMA_DSCRB_PKT_SIZE(length);
-       
+
        /*
-        * fill in the context 
+        * fill in the context
         */
-       
+
        d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
-       
-       /* 
-        * point at next packet 
+
+       /*
+        * point at next packet
         */
-       
+
        d->sbdma_addptr = nextdsc;
-       
-       /* 
+
+       /*
         * Give the buffer to the DMA engine.
         */
-       
-       SBMAC_WRITECSR(d->sbdma_dscrcnt,1);
-       
+
+       __raw_writeq(1, d->sbdma_dscrcnt);
+
        return 0;                                       /* we did it */
 }
 
@@ -1091,12 +1067,12 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
 
 /**********************************************************************
  *  SBDMA_EMPTYRING(d)
- *  
+ *
  *  Free all allocated sk_buffs on the specified DMA channel;
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        d  - DMA channel
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1105,7 +1081,7 @@ static void sbdma_emptyring(sbmacdma_t *d)
 {
        int idx;
        struct sk_buff *sb;
-       
+
        for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
                sb = d->sbdma_ctxtable[idx];
                if (sb) {
@@ -1118,13 +1094,13 @@ static void sbdma_emptyring(sbmacdma_t *d)
 
 /**********************************************************************
  *  SBDMA_FILLRING(d)
- *  
+ *
  *  Fill the specified DMA channel (must be receive channel)
  *  with sk_buffs
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        d - DMA channel
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1132,7 +1108,7 @@ static void sbdma_emptyring(sbmacdma_t *d)
 static void sbdma_fillring(sbmacdma_t *d)
 {
        int idx;
-       
+
        for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
                if (sbdma_add_rcvbuffer(d,NULL) != 0)
                        break;
@@ -1142,16 +1118,16 @@ static void sbdma_fillring(sbmacdma_t *d)
 
 /**********************************************************************
  *  SBDMA_RX_PROCESS(sc,d)
- *  
- *  Process "completed" receive buffers on the specified DMA channel.  
+ *
+ *  Process "completed" receive buffers on the specified DMA channel.
  *  Note that this isn't really ideal for priority channels, since
- *  it processes all of the packets on a given channel before 
- *  returning. 
+ *  it processes all of the packets on a given channel before
+ *  returning.
  *
- *  Input parameters: 
+ *  Input parameters:
  *        sc - softc structure
  *        d - DMA channel context
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1163,56 +1139,56 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
        sbdmadscr_t *dsc;
        struct sk_buff *sb;
        int len;
-       
+
        for (;;) {
-               /* 
+               /*
                 * figure out where we are (as an index) and where
                 * the hardware is (also as an index)
                 *
-                * This could be done faster if (for example) the 
+                * This could be done faster if (for example) the
                 * descriptor table was page-aligned and contiguous in
                 * both virtual and physical memory -- you could then
                 * just compare the low-order bits of the virtual address
                 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
                 */
-               
+
                curidx = d->sbdma_remptr - d->sbdma_dscrtable;
-               hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+               hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
                                d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
-               
+
                /*
                 * If they're the same, that means we've processed all
                 * of the descriptors up to (but not including) the one that
                 * the hardware is working on right now.
                 */
-               
+
                if (curidx == hwidx)
                        break;
-               
+
                /*
                 * Otherwise, get the packet's sk_buff ptr back
                 */
-               
+
                dsc = &(d->sbdma_dscrtable[curidx]);
                sb = d->sbdma_ctxtable[curidx];
                d->sbdma_ctxtable[curidx] = NULL;
-               
+
                len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
-               
+
                /*
                 * Check packet status.  If good, process it.
                 * If not, silently drop it and put it back on the
                 * receive ring.
                 */
-               
+
                if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) {
-                       
+
                        /*
                         * Add a new buffer to replace the old one.  If we fail
                         * to allocate a buffer, we're going to drop this
                         * packet and put it right back on the receive ring.
                         */
-                       
+
                        if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) {
                                sc->sbm_stats.rx_dropped++;
                                sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
@@ -1221,7 +1197,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
                                 * Set length into the packet
                                 */
                                skb_put(sb,len);
-                               
+
                                /*
                                 * Buffer has been replaced on the
                                 * receive ring.  Pass the buffer to
@@ -1240,7 +1216,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
                                                sb->ip_summed = CHECKSUM_NONE;
                                        }
                                }
-                               
+
                                netif_rx(sb);
                        }
                } else {
@@ -1251,14 +1227,14 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
                        sc->sbm_stats.rx_errors++;
                        sbdma_add_rcvbuffer(d,sb);
                }
-               
-               
-               /* 
+
+
+               /*
                 * .. and advance to the next buffer.
                 */
-               
+
                d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
-               
+
        }
 }
 
@@ -1266,17 +1242,17 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
 
 /**********************************************************************
  *  SBDMA_TX_PROCESS(sc,d)
- *  
- *  Process "completed" transmit buffers on the specified DMA channel.  
+ *
+ *  Process "completed" transmit buffers on the specified DMA channel.
  *  This is normally called within the interrupt service routine.
  *  Note that this isn't really ideal for priority channels, since
- *  it processes all of the packets on a given channel before 
- *  returning. 
+ *  it processes all of the packets on a given channel before
+ *  returning.
  *
- *  Input parameters: 
+ *  Input parameters:
  *      sc - softc structure
  *        d - DMA channel context
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1290,21 +1266,21 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
        unsigned long flags;
 
        spin_lock_irqsave(&(sc->sbm_lock), flags);
-       
+
        for (;;) {
-               /* 
+               /*
                 * figure out where we are (as an index) and where
                 * the hardware is (also as an index)
                 *
-                * This could be done faster if (for example) the 
+                * This could be done faster if (for example) the
                 * descriptor table was page-aligned and contiguous in
                 * both virtual and physical memory -- you could then
                 * just compare the low-order bits of the virtual address
                 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
                 */
-               
+
                curidx = d->sbdma_remptr - d->sbdma_dscrtable;
-               hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+               hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
                                d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
 
                /*
@@ -1312,75 +1288,75 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
                 * of the descriptors up to (but not including) the one that
                 * the hardware is working on right now.
                 */
-               
+
                if (curidx == hwidx)
                        break;
-               
+
                /*
                 * Otherwise, get the packet's sk_buff ptr back
                 */
-               
+
                dsc = &(d->sbdma_dscrtable[curidx]);
                sb = d->sbdma_ctxtable[curidx];
                d->sbdma_ctxtable[curidx] = NULL;
-               
+
                /*
                 * Stats
                 */
-               
+
                sc->sbm_stats.tx_bytes += sb->len;
                sc->sbm_stats.tx_packets++;
-               
+
                /*
                 * for transmits, we just free buffers.
                 */
-               
+
                dev_kfree_skb_irq(sb);
-               
-               /* 
+
+               /*
                 * .. and advance to the next buffer.
                 */
 
                d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
-               
+
        }
-       
+
        /*
         * Decide if we should wake up the protocol or not.
         * Other drivers seem to do this when we reach a low
         * watermark on the transmit queue.
         */
-       
+
        netif_wake_queue(d->sbdma_eth->sbm_dev);
-       
+
        spin_unlock_irqrestore(&(sc->sbm_lock), flags);
-       
+
 }
 
 
 
 /**********************************************************************
  *  SBMAC_INITCTX(s)
- *  
+ *
  *  Initialize an Ethernet context structure - this is called
  *  once per MAC on the 1250.  Memory is allocated here, so don't
  *  call it again from inside the ioctl routines that bring the
  *  interface up/down
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac context structure
- *        
+ *
  *  Return value:
  *        0
  ********************************************************************* */
 
 static int sbmac_initctx(struct sbmac_softc *s)
 {
-       
-       /* 
-        * figure out the addresses of some ports 
+
+       /*
+        * figure out the addresses of some ports
         */
-       
+
        s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
        s->sbm_maccfg    = s->sbm_base + R_MAC_CFG;
        s->sbm_fifocfg   = s->sbm_base + R_MAC_THRSH_CFG;
@@ -1397,29 +1373,29 @@ static int sbmac_initctx(struct sbmac_softc *s)
        s->sbm_phy_oldanlpar = 0;
        s->sbm_phy_oldk1stsr = 0;
        s->sbm_phy_oldlinkstat = 0;
-       
+
        /*
         * Initialize the DMA channels.  Right now, only one per MAC is used
         * Note: Only do this _once_, as it allocates memory from the kernel!
         */
-       
+
        sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
        sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
-       
+
        /*
         * initial state is OFF
         */
-       
+
        s->sbm_state = sbmac_state_off;
-       
+
        /*
         * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
         */
-       
+
        s->sbm_speed = sbmac_speed_10;
        s->sbm_duplex = sbmac_duplex_half;
        s->sbm_fc = sbmac_fc_disabled;
-       
+
        return 0;
 }
 
@@ -1430,7 +1406,7 @@ static void sbdma_uninitctx(struct sbmacdma_s *d)
                kfree(d->sbdma_dscrtable);
                d->sbdma_dscrtable = NULL;
        }
-       
+
        if (d->sbdma_ctxtable) {
                kfree(d->sbdma_ctxtable);
                d->sbdma_ctxtable = NULL;
@@ -1447,12 +1423,12 @@ static void sbmac_uninitctx(struct sbmac_softc *sc)
 
 /**********************************************************************
  *  SBMAC_CHANNEL_START(s)
- *  
+ *
  *  Start packet processing on this MAC.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1460,49 +1436,49 @@ static void sbmac_uninitctx(struct sbmac_softc *sc)
 static void sbmac_channel_start(struct sbmac_softc *s)
 {
        uint64_t reg;
-       sbmac_port_t port;
+       volatile void __iomem *port;
        uint64_t cfg,fifo,framecfg;
        int idx, th_value;
-       
+
        /*
         * Don't do this if running
         */
 
        if (s->sbm_state == sbmac_state_on)
                return;
-       
+
        /*
         * Bring the controller out of reset, but leave it off.
         */
-       
-       SBMAC_WRITECSR(s->sbm_macenable,0);
-       
+
+       __raw_writeq(0, s->sbm_macenable);
+
        /*
         * Ignore all received packets
         */
-       
-       SBMAC_WRITECSR(s->sbm_rxfilter,0);
-       
-       /* 
+
+       __raw_writeq(0, s->sbm_rxfilter);
+
+       /*
         * Calculate values for various control registers.
         */
-       
+
        cfg = M_MAC_RETRY_EN |
-               M_MAC_TX_HOLD_SOP_EN | 
+               M_MAC_TX_HOLD_SOP_EN |
                V_MAC_TX_PAUSE_CNT_16K |
                M_MAC_AP_STAT_EN |
                M_MAC_FAST_SYNC |
                M_MAC_SS_EN |
                0;
-       
-       /* 
+
+       /*
         * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
         * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
         * Use a larger RD_THRSH for gigabit
         */
-       if (periph_rev >= 2) 
+       if (periph_rev >= 2)
                th_value = 64;
-       else 
+       else
                th_value = 28;
 
        fifo = V_MAC_TX_WR_THRSH(4) |   /* Must be '4' or '8' */
@@ -1520,51 +1496,51 @@ static void sbmac_channel_start(struct sbmac_softc *s)
                V_MAC_BACKOFF_SEL(1);
 
        /*
-        * Clear out the hash address map 
+        * Clear out the hash address map
         */
-       
+
        port = s->sbm_base + R_MAC_HASH_BASE;
        for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
-               SBMAC_WRITECSR(port,0);
+               __raw_writeq(0, port);
                port += sizeof(uint64_t);
        }
-       
+
        /*
         * Clear out the exact-match table
         */
-       
+
        port = s->sbm_base + R_MAC_ADDR_BASE;
        for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
-               SBMAC_WRITECSR(port,0);
+               __raw_writeq(0, port);
                port += sizeof(uint64_t);
        }
-       
+
        /*
         * Clear out the DMA Channel mapping table registers
         */
-       
+
        port = s->sbm_base + R_MAC_CHUP0_BASE;
        for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
-               SBMAC_WRITECSR(port,0);
+               __raw_writeq(0, port);
                port += sizeof(uint64_t);
        }
 
 
        port = s->sbm_base + R_MAC_CHLO0_BASE;
        for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
-               SBMAC_WRITECSR(port,0);
+               __raw_writeq(0, port);
                port += sizeof(uint64_t);
        }
-       
+
        /*
         * Program the hardware address.  It goes into the hardware-address
         * register as well as the first filter register.
         */
-       
+
        reg = sbmac_addr2reg(s->sbm_hwaddr);
-       
+
        port = s->sbm_base + R_MAC_ADDR_BASE;
-       SBMAC_WRITECSR(port,reg);
+       __raw_writeq(reg, port);
        port = s->sbm_base + R_MAC_ETHERNET_ADDR;
 
 #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
@@ -1573,108 +1549,105 @@ static void sbmac_channel_start(struct sbmac_softc *s)
         * destination address in the R_MAC_ETHERNET_ADDR register.
         * Set the value to zero.
         */
-       SBMAC_WRITECSR(port,0);
+       __raw_writeq(0, port);
 #else
-       SBMAC_WRITECSR(port,reg);
+       __raw_writeq(reg, port);
 #endif
-       
+
        /*
         * Set the receive filter for no packets, and write values
         * to the various config registers
         */
-       
-       SBMAC_WRITECSR(s->sbm_rxfilter,0);
-       SBMAC_WRITECSR(s->sbm_imr,0);
-       SBMAC_WRITECSR(s->sbm_framecfg,framecfg);
-       SBMAC_WRITECSR(s->sbm_fifocfg,fifo);
-       SBMAC_WRITECSR(s->sbm_maccfg,cfg);
-       
+
+       __raw_writeq(0, s->sbm_rxfilter);
+       __raw_writeq(0, s->sbm_imr);
+       __raw_writeq(framecfg, s->sbm_framecfg);
+       __raw_writeq(fifo, s->sbm_fifocfg);
+       __raw_writeq(cfg, s->sbm_maccfg);
+
        /*
         * Initialize DMA channels (rings should be ok now)
         */
-       
+
        sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
        sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
-       
+
        /*
         * Configure the speed, duplex, and flow control
         */
 
        sbmac_set_speed(s,s->sbm_speed);
        sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
-       
+
        /*
         * Fill the receive ring
         */
-       
+
        sbdma_fillring(&(s->sbm_rxdma));
-       
-       /* 
+
+       /*
         * Turn on the rest of the bits in the enable register
-        */      
-       
-       SBMAC_WRITECSR(s->sbm_macenable,
-                      M_MAC_RXDMA_EN0 |
+        */
+
+       __raw_writeq(M_MAC_RXDMA_EN0 |
                       M_MAC_TXDMA_EN0 |
                       M_MAC_RX_ENABLE |
-                      M_MAC_TX_ENABLE);
-       
-       
+                      M_MAC_TX_ENABLE, s->sbm_macenable);
+
+
 
 
 #ifdef CONFIG_SBMAC_COALESCE
        /*
         * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
         */
-       SBMAC_WRITECSR(s->sbm_imr,
-                      ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
-                      ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0));
+       __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+                      ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
 #else
        /*
         * Accept any kind of interrupt on TX and RX DMA channel 0
         */
-       SBMAC_WRITECSR(s->sbm_imr,
-                      (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
-                      (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
+       __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+                      (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
 #endif
-       
-       /* 
-        * Enable receiving unicasts and broadcasts 
+
+       /*
+        * Enable receiving unicasts and broadcasts
         */
-       
-       SBMAC_WRITECSR(s->sbm_rxfilter,M_MAC_UCAST_EN | M_MAC_BCAST_EN);
-       
+
+       __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
+
        /*
-        * we're running now. 
+        * we're running now.
         */
-       
+
        s->sbm_state = sbmac_state_on;
-       
-       /* 
-        * Program multicast addresses 
+
+       /*
+        * Program multicast addresses
         */
-       
+
        sbmac_setmulti(s);
-       
-       /* 
-        * If channel was in promiscuous mode before, turn that on 
+
+       /*
+        * If channel was in promiscuous mode before, turn that on
         */
-       
+
        if (s->sbm_devflags & IFF_PROMISC) {
                sbmac_promiscuous_mode(s,1);
        }
-       
+
 }
 
 
 /**********************************************************************
  *  SBMAC_CHANNEL_STOP(s)
- *  
+ *
  *  Stop packet processing on this MAC.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1682,49 +1655,49 @@ static void sbmac_channel_start(struct sbmac_softc *s)
 static void sbmac_channel_stop(struct sbmac_softc *s)
 {
        /* don't do this if already stopped */
-       
+
        if (s->sbm_state == sbmac_state_off)
                return;
-       
+
        /* don't accept any packets, disable all interrupts */
-       
-       SBMAC_WRITECSR(s->sbm_rxfilter,0);
-       SBMAC_WRITECSR(s->sbm_imr,0);
-       
+
+       __raw_writeq(0, s->sbm_rxfilter);
+       __raw_writeq(0, s->sbm_imr);
+
        /* Turn off ticker */
-       
+
        /* XXX */
-       
+
        /* turn off receiver and transmitter */
-       
-       SBMAC_WRITECSR(s->sbm_macenable,0);
-       
+
+       __raw_writeq(0, s->sbm_macenable);
+
        /* We're stopped now. */
-       
+
        s->sbm_state = sbmac_state_off;
-       
+
        /*
         * Stop DMA channels (rings should be ok now)
         */
-       
+
        sbdma_channel_stop(&(s->sbm_rxdma));
        sbdma_channel_stop(&(s->sbm_txdma));
-       
+
        /* Empty the receive and transmit rings */
-       
+
        sbdma_emptyring(&(s->sbm_rxdma));
        sbdma_emptyring(&(s->sbm_txdma));
-       
+
 }
 
 /**********************************************************************
  *  SBMAC_SET_CHANNEL_STATE(state)
- *  
+ *
  *  Set the channel's state ON or OFF
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        state - new state
- *        
+ *
  *  Return value:
  *        old state
  ********************************************************************* */
@@ -1732,43 +1705,43 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
                                             sbmac_state_t state)
 {
        sbmac_state_t oldstate = sc->sbm_state;
-       
+
        /*
         * If same as previous state, return
         */
-       
+
        if (state == oldstate) {
                return oldstate;
        }
-       
+
        /*
-        * If new state is ON, turn channel on 
+        * If new state is ON, turn channel on
         */
-       
+
        if (state == sbmac_state_on) {
                sbmac_channel_start(sc);
        }
        else {
                sbmac_channel_stop(sc);
        }
-       
+
        /*
         * Return previous state
         */
-       
+
        return oldstate;
 }
 
 
 /**********************************************************************
  *  SBMAC_PROMISCUOUS_MODE(sc,onoff)
- *  
+ *
  *  Turn on or off promiscuous mode
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        sc - softc
  *      onoff - 1 to turn on, 0 to turn off
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1776,30 +1749,30 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
 static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
 {
        uint64_t reg;
-       
+
        if (sc->sbm_state != sbmac_state_on)
                return;
-       
+
        if (onoff) {
-               reg = SBMAC_READCSR(sc->sbm_rxfilter);
+               reg = __raw_readq(sc->sbm_rxfilter);
                reg |= M_MAC_ALLPKT_EN;
-               SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
-       }       
+               __raw_writeq(reg, sc->sbm_rxfilter);
+       }
        else {
-               reg = SBMAC_READCSR(sc->sbm_rxfilter);
+               reg = __raw_readq(sc->sbm_rxfilter);
                reg &= ~M_MAC_ALLPKT_EN;
-               SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+               __raw_writeq(reg, sc->sbm_rxfilter);
        }
 }
 
 /**********************************************************************
  *  SBMAC_SETIPHDR_OFFSET(sc,onoff)
- *  
+ *
  *  Set the iphdr offset as 15 assuming ethernet encapsulation
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        sc - softc
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -1807,12 +1780,12 @@ static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
 static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
 {
        uint64_t reg;
-       
+
        /* Hard code the off set to 15 for now */
-       reg = SBMAC_READCSR(sc->sbm_rxfilter);
+       reg = __raw_readq(sc->sbm_rxfilter);
        reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
-       SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
-       
+       __raw_writeq(reg, sc->sbm_rxfilter);
+
        /* read system identification to determine revision */
        if (periph_rev >= 2) {
                sc->rx_hw_checksum = ENABLE;
@@ -1824,13 +1797,13 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
 
 /**********************************************************************
  *  SBMAC_ADDR2REG(ptr)
- *  
+ *
  *  Convert six bytes into the 64-bit register value that
  *  we typically write into the SBMAC's address/mcast registers
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        ptr - pointer to 6 bytes
- *        
+ *
  *  Return value:
  *        register value
  ********************************************************************* */
@@ -1838,35 +1811,35 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
 static uint64_t sbmac_addr2reg(unsigned char *ptr)
 {
        uint64_t reg = 0;
-       
+
        ptr += 6;
-       
-       reg |= (uint64_t) *(--ptr); 
+
+       reg |= (uint64_t) *(--ptr);
        reg <<= 8;
-       reg |= (uint64_t) *(--ptr); 
+       reg |= (uint64_t) *(--ptr);
        reg <<= 8;
-       reg |= (uint64_t) *(--ptr); 
+       reg |= (uint64_t) *(--ptr);
        reg <<= 8;
-       reg |= (uint64_t) *(--ptr); 
+       reg |= (uint64_t) *(--ptr);
        reg <<= 8;
-       reg |= (uint64_t) *(--ptr); 
+       reg |= (uint64_t) *(--ptr);
        reg <<= 8;
-       reg |= (uint64_t) *(--ptr); 
-       
+       reg |= (uint64_t) *(--ptr);
+
        return reg;
 }
 
 
 /**********************************************************************
  *  SBMAC_SET_SPEED(s,speed)
- *  
+ *
  *  Configure LAN speed for the specified MAC.
  *  Warning: must be called when MAC is off!
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
  *        speed - speed to set MAC to (see sbmac_speed_t enum)
- *        
+ *
  *  Return value:
  *        1 if successful
  *      0 indicates invalid parameters
@@ -1880,31 +1853,31 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
        /*
         * Save new current values
         */
-       
+
        s->sbm_speed = speed;
-       
+
        if (s->sbm_state == sbmac_state_on)
                return 0;       /* save for next restart */
 
        /*
-        * Read current register values 
+        * Read current register values
         */
-       
-       cfg = SBMAC_READCSR(s->sbm_maccfg);
-       framecfg = SBMAC_READCSR(s->sbm_framecfg);
-       
+
+       cfg = __raw_readq(s->sbm_maccfg);
+       framecfg = __raw_readq(s->sbm_framecfg);
+
        /*
         * Mask out the stuff we want to change
         */
-       
+
        cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
        framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
                      M_MAC_SLOT_SIZE);
-       
+
        /*
         * Now add in the new bits
         */
-       
+
        switch (speed) {
        case sbmac_speed_10:
                framecfg |= V_MAC_IFG_RX_10 |
@@ -1913,7 +1886,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
                        V_MAC_SLOT_SIZE_10;
                cfg |= V_MAC_SPEED_SEL_10MBPS;
                break;
-               
+
        case sbmac_speed_100:
                framecfg |= V_MAC_IFG_RX_100 |
                        V_MAC_IFG_TX_100 |
@@ -1921,7 +1894,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
                        V_MAC_SLOT_SIZE_100;
                cfg |= V_MAC_SPEED_SEL_100MBPS ;
                break;
-               
+
        case sbmac_speed_1000:
                framecfg |= V_MAC_IFG_RX_1000 |
                        V_MAC_IFG_TX_1000 |
@@ -1929,34 +1902,34 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
                        V_MAC_SLOT_SIZE_1000;
                cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
                break;
-               
+
        case sbmac_speed_auto:          /* XXX not implemented */
                /* fall through */
        default:
                return 0;
        }
-       
+
        /*
-        * Send the bits back to the hardware 
+        * Send the bits back to the hardware
         */
-       
-       SBMAC_WRITECSR(s->sbm_framecfg,framecfg);
-       SBMAC_WRITECSR(s->sbm_maccfg,cfg);
-       
+
+       __raw_writeq(framecfg, s->sbm_framecfg);
+       __raw_writeq(cfg, s->sbm_maccfg);
+
        return 1;
 }
 
 /**********************************************************************
  *  SBMAC_SET_DUPLEX(s,duplex,fc)
- *  
+ *
  *  Set Ethernet duplex and flow control options for this MAC
  *  Warning: must be called when MAC is off!
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        s - sbmac structure
  *        duplex - duplex setting (see sbmac_duplex_t)
  *        fc - flow control setting (see sbmac_fc_t)
- *        
+ *
  *  Return value:
  *        1 if ok
  *        0 if an invalid parameter combination was specified
@@ -1965,67 +1938,67 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
 static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
 {
        uint64_t cfg;
-       
+
        /*
         * Save new current values
         */
-       
+
        s->sbm_duplex = duplex;
        s->sbm_fc = fc;
-       
+
        if (s->sbm_state == sbmac_state_on)
                return 0;       /* save for next restart */
-       
+
        /*
-        * Read current register values 
+        * Read current register values
         */
-       
-       cfg = SBMAC_READCSR(s->sbm_maccfg);
-       
+
+       cfg = __raw_readq(s->sbm_maccfg);
+
        /*
         * Mask off the stuff we're about to change
         */
-       
+
        cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
-       
-       
+
+
        switch (duplex) {
        case sbmac_duplex_half:
                switch (fc) {
                case sbmac_fc_disabled:
                        cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
                        break;
-                       
+
                case sbmac_fc_collision:
                        cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
                        break;
-                       
+
                case sbmac_fc_carrier:
                        cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
                        break;
-                       
+
                case sbmac_fc_auto:             /* XXX not implemented */
-                       /* fall through */                                         
+                       /* fall through */
                case sbmac_fc_frame:            /* not valid in half duplex */
                default:                        /* invalid selection */
                        return 0;
                }
                break;
-               
+
        case sbmac_duplex_full:
                switch (fc) {
                case sbmac_fc_disabled:
                        cfg |= V_MAC_FC_CMD_DISABLED;
                        break;
-                       
+
                case sbmac_fc_frame:
                        cfg |= V_MAC_FC_CMD_ENABLED;
                        break;
-                       
+
                case sbmac_fc_collision:        /* not valid in full duplex */
                case sbmac_fc_carrier:          /* not valid in full duplex */
                case sbmac_fc_auto:             /* XXX not implemented */
-                       /* fall through */                                         
+                       /* fall through */
                default:
                        return 0;
                }
@@ -2034,13 +2007,13 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc
                /* XXX not implemented */
                break;
        }
-       
+
        /*
-        * Send the bits back to the hardware 
+        * Send the bits back to the hardware
         */
-       
-       SBMAC_WRITECSR(s->sbm_maccfg,cfg);
-       
+
+       __raw_writeq(cfg, s->sbm_maccfg);
+
        return 1;
 }
 
@@ -2049,12 +2022,12 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc
 
 /**********************************************************************
  *  SBMAC_INTR()
- *  
+ *
  *  Interrupt handler for MAC interrupts
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        MAC structure
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -2066,27 +2039,27 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
        int handled = 0;
 
        for (;;) {
-               
+
                /*
                 * Read the ISR (this clears the bits in the real
                 * register, except for counter addr)
                 */
-               
-               isr = SBMAC_READCSR(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
-               
+
+               isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
+
                if (isr == 0)
                        break;
 
                handled = 1;
-               
+
                /*
                 * Transmits on channel 0
                 */
-               
+
                if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
                        sbdma_tx_process(sc,&(sc->sbm_txdma));
                }
-               
+
                /*
                 * Receives on channel 0
                 */
@@ -2106,8 +2079,8 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
                 * EOP_SEEN here takes care of this case.
                 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
                 */
-                
-               
+
+
                if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
                        sbdma_rx_process(sc,&(sc->sbm_rxdma));
                }
@@ -2118,29 +2091,29 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
 
 /**********************************************************************
  *  SBMAC_START_TX(skb,dev)
- *  
- *  Start output on the specified interface.  Basically, we 
+ *
+ *  Start output on the specified interface.  Basically, we
  *  queue as many buffers as we can until the ring fills up, or
  *  we run off the end of the queue, whichever comes first.
- *  
- *  Input parameters: 
- *        
- *        
+ *
+ *  Input parameters:
+ *
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
 static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct sbmac_softc *sc = netdev_priv(dev);
-       
+
        /* lock eth irq */
        spin_lock_irq (&sc->sbm_lock);
-       
+
        /*
-        * Put the buffer on the transmit ring.  If we 
+        * Put the buffer on the transmit ring.  If we
         * don't have room, stop the queue.
         */
-       
+
        if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
                /* XXX save skb that we could not send */
                netif_stop_queue(dev);
@@ -2148,24 +2121,24 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
 
                return 1;
        }
-       
+
        dev->trans_start = jiffies;
-       
+
        spin_unlock_irq (&sc->sbm_lock);
-       
+
        return 0;
 }
 
 /**********************************************************************
  *  SBMAC_SETMULTI(sc)
- *  
+ *
  *  Reprogram the multicast table into the hardware, given
  *  the list of multicasts associated with the interface
  *  structure.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        sc - softc
- *        
+ *
  *  Return value:
  *        nothing
  ********************************************************************* */
@@ -2173,75 +2146,75 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
 static void sbmac_setmulti(struct sbmac_softc *sc)
 {
        uint64_t reg;
-       sbmac_port_t port;
+       volatile void __iomem *port;
        int idx;
        struct dev_mc_list *mclist;
        struct net_device *dev = sc->sbm_dev;
-       
-       /* 
+
+       /*
         * Clear out entire multicast table.  We do this by nuking
         * the entire hash table and all the direct matches except
-        * the first one, which is used for our station address 
+        * the first one, which is used for our station address
         */
-       
+
        for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
                port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
-               SBMAC_WRITECSR(port,0); 
+               __raw_writeq(0, port);
        }
-       
+
        for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
                port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
-               SBMAC_WRITECSR(port,0); 
+               __raw_writeq(0, port);
        }
-       
+
        /*
         * Clear the filter to say we don't want any multicasts.
         */
-       
-       reg = SBMAC_READCSR(sc->sbm_rxfilter);
+
+       reg = __raw_readq(sc->sbm_rxfilter);
        reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
-       SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
-       
+       __raw_writeq(reg, sc->sbm_rxfilter);
+
        if (dev->flags & IFF_ALLMULTI) {
-               /* 
-                * Enable ALL multicasts.  Do this by inverting the 
-                * multicast enable bit. 
+               /*
+                * Enable ALL multicasts.  Do this by inverting the
+                * multicast enable bit.
                 */
-               reg = SBMAC_READCSR(sc->sbm_rxfilter);
+               reg = __raw_readq(sc->sbm_rxfilter);
                reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
-               SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+               __raw_writeq(reg, sc->sbm_rxfilter);
                return;
        }
-       
 
-       /* 
+
+       /*
         * Progam new multicast entries.  For now, only use the
         * perfect filter.  In the future we'll need to use the
         * hash filter if the perfect filter overflows
         */
-       
+
        /* XXX only using perfect filter for now, need to use hash
         * XXX if the table overflows */
-       
+
        idx = 1;                /* skip station address */
        mclist = dev->mc_list;
        while (mclist && (idx < MAC_ADDR_COUNT)) {
                reg = sbmac_addr2reg(mclist->dmi_addr);
                port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
-               SBMAC_WRITECSR(port,reg);
+               __raw_writeq(reg, port);
                idx++;
                mclist = mclist->next;
        }
-       
-       /*      
+
+       /*
         * Enable the "accept multicast bits" if we programmed at least one
-        * multicast. 
+        * multicast.
         */
-       
+
        if (idx > 1) {
-               reg = SBMAC_READCSR(sc->sbm_rxfilter);
+               reg = __raw_readq(sc->sbm_rxfilter);
                reg |= M_MAC_MCAST_EN;
-               SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+               __raw_writeq(reg, sc->sbm_rxfilter);
        }
 }
 
@@ -2250,12 +2223,12 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
 /**********************************************************************
  *  SBMAC_PARSE_XDIGIT(str)
- *  
+ *
  *  Parse a hex digit, returning its value
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        str - character
- *        
+ *
  *  Return value:
  *        hex value, or -1 if invalid
  ********************************************************************* */
@@ -2263,7 +2236,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
 static int sbmac_parse_xdigit(char str)
 {
        int digit;
-       
+
        if ((str >= '0') && (str <= '9'))
                digit = str - '0';
        else if ((str >= 'a') && (str <= 'f'))
@@ -2272,20 +2245,20 @@ static int sbmac_parse_xdigit(char str)
                digit = str - 'A' + 10;
        else
                return -1;
-       
+
        return digit;
 }
 
 /**********************************************************************
  *  SBMAC_PARSE_HWADDR(str,hwaddr)
- *  
+ *
  *  Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
  *  Ethernet address.
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        str - string
  *        hwaddr - pointer to hardware address
- *        
+ *
  *  Return value:
  *        0 if ok, else -1
  ********************************************************************* */
@@ -2294,7 +2267,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
 {
        int digit1,digit2;
        int idx = 6;
-       
+
        while (*str && (idx > 0)) {
                digit1 = sbmac_parse_xdigit(*str);
                if (digit1 < 0)
@@ -2302,7 +2275,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
                str++;
                if (!*str)
                        return -1;
-               
+
                if ((*str == ':') || (*str == '-')) {
                        digit2 = digit1;
                        digit1 = 0;
@@ -2313,10 +2286,10 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
                                return -1;
                        str++;
                }
-               
+
                *hwaddr++ = (digit1 << 4) | digit2;
                idx--;
-               
+
                if (*str == '-')
                        str++;
                if (*str == ':')
@@ -2337,12 +2310,12 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
 
 /**********************************************************************
  *  SBMAC_INIT(dev)
- *  
+ *
  *  Attach routine - init hardware and hook ourselves into linux
- *  
- *  Input parameters: 
+ *
+ *  Input parameters:
  *        dev - net_device structure
- *        
+ *
  *  Return value:
  *        status
  ********************************************************************* */
@@ -2354,53 +2327,53 @@ static int sbmac_init(struct net_device *dev, int idx)
        uint64_t ea_reg;
        int i;
        int err;
-       
+
        sc = netdev_priv(dev);
-       
+
        /* Determine controller base address */
-       
+
        sc->sbm_base = IOADDR(dev->base_addr);
        sc->sbm_dev = dev;
        sc->sbe_idx = idx;
-       
+
        eaddr = sc->sbm_hwaddr;
-       
-       /* 
+
+       /*
         * Read the ethernet address.  The firwmare left this programmed
         * for us in the ethernet address register for each mac.
         */
-       
-       ea_reg = SBMAC_READCSR(sc->sbm_base + R_MAC_ETHERNET_ADDR);
-       SBMAC_WRITECSR(sc->sbm_base + R_MAC_ETHERNET_ADDR, 0);
+
+       ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
+       __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
        for (i = 0; i < 6; i++) {
                eaddr[i] = (uint8_t) (ea_reg & 0xFF);
                ea_reg >>= 8;
        }
-       
+
        for (i = 0; i < 6; i++) {
                dev->dev_addr[i] = eaddr[i];
        }
-       
-       
+
+
        /*
-        * Init packet size 
+        * Init packet size
         */
-       
+
        sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
 
-       /* 
+       /*
         * Initialize context (get pointers to registers and stuff), then
         * allocate the memory for the descriptor tables.
         */
-       
+
        sbmac_initctx(sc);
-       
+
        /*
         * Set up Linux device callins
         */
-       
+
        spin_lock_init(&(sc->sbm_lock));
-       
+
        dev->open               = sbmac_open;
        dev->hard_start_xmit    = sbmac_start_tx;
        dev->stop               = sbmac_close;
@@ -2419,7 +2392,7 @@ static int sbmac_init(struct net_device *dev, int idx)
        if (err)
                goto out_uninit;
 
-       if (periph_rev >= 2) {
+       if (sc->rx_hw_checksum == ENABLE) {
                printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
                        sc->sbm_dev->name);
        }
@@ -2430,10 +2403,10 @@ static int sbmac_init(struct net_device *dev, int idx)
         * was being displayed)
         */
        printk(KERN_INFO
-              "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n", 
+              "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
               dev->name, dev->base_addr,
               eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
-       
+
 
        return 0;
 
@@ -2447,54 +2420,86 @@ out_uninit:
 static int sbmac_open(struct net_device *dev)
 {
        struct sbmac_softc *sc = netdev_priv(dev);
-       
+
        if (debug > 1) {
                printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
        }
-       
-       /* 
+
+       /*
         * map/route interrupt (clear status first, in case something
         * weird is pending; we haven't initialized the mac registers
         * yet)
         */
 
-       SBMAC_READCSR(sc->sbm_isr);
+       __raw_readq(sc->sbm_isr);
        if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
                return -EBUSY;
 
        /*
-        * Configure default speed 
+        * Probe phy address
+        */
+
+       if(sbmac_mii_probe(dev) == -1) {
+               printk("%s: failed to probe PHY.\n", dev->name);
+               return -EINVAL;
+       }
+
+       /*
+        * Configure default speed
         */
 
        sbmac_mii_poll(sc,noisy_mii);
-       
+
        /*
         * Turn on the channel
         */
 
        sbmac_set_channel_state(sc,sbmac_state_on);
-       
+
        /*
         * XXX Station address is in dev->dev_addr
         */
-       
+
        if (dev->if_port == 0)
-               dev->if_port = 0; 
-       
+               dev->if_port = 0;
+
        netif_start_queue(dev);
-       
+
        sbmac_set_rx_mode(dev);
-       
+
        /* Set the timer to check for link beat. */
        init_timer(&sc->sbm_timer);
        sc->sbm_timer.expires = jiffies + 2 * HZ/100;
        sc->sbm_timer.data = (unsigned long)dev;
        sc->sbm_timer.function = &sbmac_timer;
        add_timer(&sc->sbm_timer);
-       
+
        return 0;
 }
 
+static int sbmac_mii_probe(struct net_device *dev)
+{
+       int i;
+       struct sbmac_softc *s = netdev_priv(dev);
+       u16 bmsr, id1, id2;
+       u32 vendor, device;
+
+       for (i=1; i<31; i++) {
+       bmsr = sbmac_mii_read(s, i, MII_BMSR);
+               if (bmsr != 0) {
+                       s->sbm_phys[0] = i;
+                       id1 = sbmac_mii_read(s, i, MII_PHYIDR1);
+                       id2 = sbmac_mii_read(s, i, MII_PHYIDR2);
+                       vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f);
+                       device = (id2 >> 4) & 0x3f;
+
+                       printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n",
+                               dev->name, i, vendor, device);
+                       return i;
+               }
+       }
+       return -1;
+}
 
 
 static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
@@ -2609,20 +2614,20 @@ static void sbmac_timer(unsigned long data)
        int mii_status;
 
        spin_lock_irq (&sc->sbm_lock);
-       
+
        /* make IFF_RUNNING follow the MII status bit "Link established" */
        mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
-       
+
        if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
                sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
                if (mii_status & BMSR_LINKSTAT) {
                        netif_carrier_on(dev);
                }
                else {
-                       netif_carrier_off(dev); 
+                       netif_carrier_off(dev);
                }
        }
-       
+
        /*
         * Poll the PHY to see what speed we should be running at
         */
@@ -2640,9 +2645,9 @@ static void sbmac_timer(unsigned long data)
                        sbmac_channel_start(sc);
                }
        }
-       
+
        spin_unlock_irq (&sc->sbm_lock);
-       
+
        sc->sbm_timer.expires = jiffies + next_tick;
        add_timer(&sc->sbm_timer);
 }
@@ -2651,13 +2656,13 @@ static void sbmac_timer(unsigned long data)
 static void sbmac_tx_timeout (struct net_device *dev)
 {
        struct sbmac_softc *sc = netdev_priv(dev);
-       
+
        spin_lock_irq (&sc->sbm_lock);
-       
-       
+
+
        dev->trans_start = jiffies;
        sc->sbm_stats.tx_errors++;
-       
+
        spin_unlock_irq (&sc->sbm_lock);
 
        printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
@@ -2670,13 +2675,13 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
 {
        struct sbmac_softc *sc = netdev_priv(dev);
        unsigned long flags;
-       
+
        spin_lock_irqsave(&sc->sbm_lock, flags);
-       
+
        /* XXX update other stats here */
-       
+
        spin_unlock_irqrestore(&sc->sbm_lock, flags);
-       
+
        return &sc->sbm_stats;
 }
 
@@ -2693,8 +2698,8 @@ static void sbmac_set_rx_mode(struct net_device *dev)
                /*
                 * Promiscuous changed.
                 */
-               
-               if (dev->flags & IFF_PROMISC) { 
+
+               if (dev->flags & IFF_PROMISC) {
                        /* Unconditionally log net taps. */
                        msg_flag = 1;
                        sbmac_promiscuous_mode(sc,1);
@@ -2705,18 +2710,18 @@ static void sbmac_set_rx_mode(struct net_device *dev)
                }
        }
        spin_unlock_irqrestore(&sc->sbm_lock, flags);
-       
+
        if (msg_flag) {
                printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
                       dev->name,(msg_flag==1)?"en":"dis");
        }
-       
+
        /*
         * Program the multicasts.  Do this every time.
         */
-       
+
        sbmac_setmulti(sc);
-       
+
 }
 
 static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2725,10 +2730,10 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        u16 *data = (u16 *)&rq->ifr_ifru;
        unsigned long flags;
        int retval;
-       
+
        spin_lock_irqsave(&sc->sbm_lock, flags);
        retval = 0;
-       
+
        switch(cmd) {
        case SIOCDEVPRIVATE:            /* Get the address of the PHY in use. */
                data[0] = sc->sbm_phys[0] & 0x1f;
@@ -2750,7 +2755,7 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        default:
                retval = -EOPNOTSUPP;
        }
-       
+
        spin_unlock_irqrestore(&sc->sbm_lock, flags);
        return retval;
 }
@@ -2781,7 +2786,7 @@ static int sbmac_close(struct net_device *dev)
 
        sbdma_emptyring(&(sc->sbm_txdma));
        sbdma_emptyring(&(sc->sbm_rxdma));
-       
+
        return 0;
 }
 
@@ -2793,13 +2798,13 @@ sbmac_setup_hwaddr(int chan,char *addr)
 {
        uint8_t eaddr[6];
        uint64_t val;
-       sbmac_port_t port;
+       unsigned long port;
 
        port = A_MAC_CHANNEL_BASE(chan);
        sbmac_parse_hwaddr(addr,eaddr);
        val = sbmac_addr2reg(eaddr);
-       SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),val);
-       val = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR));
+       __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR));
+       val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
 }
 #endif
 
@@ -2810,9 +2815,9 @@ sbmac_init_module(void)
 {
        int idx;
        struct net_device *dev;
-       sbmac_port_t port;
+       unsigned long port;
        int chip_max_units;
-       
+
        /*
         * For bringup when not using the firmware, we can pre-fill
         * the MAC addresses using the environment variables
@@ -2858,13 +2863,13 @@ sbmac_init_module(void)
 
                port = A_MAC_CHANNEL_BASE(idx);
 
-               /*      
+               /*
                 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
                 * value for us by the firmware if we're going to use this MAC.
                 * If we find a zero, skip this MAC.
                 */
 
-               sbmac_orig_hwaddr[idx] = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR));
+               sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
                if (sbmac_orig_hwaddr[idx] == 0) {
                        printk(KERN_DEBUG "sbmac: not configuring MAC at "
                               "%lx\n", port);
@@ -2876,7 +2881,7 @@ sbmac_init_module(void)
                 */
 
                dev = alloc_etherdev(sizeof(struct sbmac_softc));
-               if (!dev) 
+               if (!dev)
                        return -ENOMEM; /* return ENOMEM */
 
                printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
@@ -2886,8 +2891,7 @@ sbmac_init_module(void)
                dev->mem_end = 0;
                if (sbmac_init(dev, idx)) {
                        port = A_MAC_CHANNEL_BASE(idx);
-                       SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),
-                                      sbmac_orig_hwaddr[idx]);
+                       __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR));
                        free_netdev(dev);
                        continue;
                }
index 9bc3b1c0dd6a27e280650b5ff99d382baf5adc6f..a4614df38a903a53207507485eeec9a5486f50ae 100644 (file)
@@ -32,8 +32,6 @@
 
 #include "sgiseeq.h"
 
-static char *version = "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n";
-
 static char *sgiseeqstr = "SGI Seeq8003";
 
 /*
@@ -113,9 +111,9 @@ static struct net_device *root_sgiseeq_dev;
 
 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
 {
-       hregs->rx_reset = HPC3_ERXRST_CRESET | HPC3_ERXRST_CLRIRQ;
+       hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
        udelay(20);
-       hregs->rx_reset = 0;
+       hregs->reset = 0;
 }
 
 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
@@ -252,7 +250,6 @@ void sgiseeq_dump_rings(void)
 
 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
-#define RDMACFG_INIT    (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ)
 
 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
                     struct sgiseeq_regs *sregs)
@@ -274,8 +271,6 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
                sregs->tstat = TSTAT_INIT_SEEQ;
        }
 
-       hregs->rx_dconfig |= RDMACFG_INIT;
-
        hregs->rx_ndptr = CPHYSADDR(sp->rx_desc);
        hregs->tx_ndptr = CPHYSADDR(sp->tx_desc);
 
@@ -446,7 +441,7 @@ static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs
        spin_lock(&sp->tx_lock);
 
        /* Ack the IRQ and set software state. */
-       hregs->rx_reset = HPC3_ERXRST_CLRIRQ;
+       hregs->reset = HPC3_ERST_CLRIRQ;
 
        /* Always check for received packets. */
        sgiseeq_rx(dev, sp, hregs, sregs);
@@ -493,11 +488,13 @@ static int sgiseeq_close(struct net_device *dev)
 {
        struct sgiseeq_private *sp = netdev_priv(dev);
        struct sgiseeq_regs *sregs = sp->sregs;
+       unsigned int irq = dev->irq;
 
        netif_stop_queue(dev);
 
        /* Shutdown the Seeq. */
        reset_hpc3_and_seeq(sp->hregs, sregs);
+       free_irq(irq, dev);
 
        return 0;
 }
@@ -644,7 +641,7 @@ static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs)
 
 #define ALIGNED(x)  ((((unsigned long)(x)) + 0xf) & ~(0xf))
 
-static int sgiseeq_init(struct hpc3_regs* regs, int irq)
+static int sgiseeq_init(struct hpc3_regs* hpcregs, int irq)
 {
        struct sgiseeq_init_block *sr;
        struct sgiseeq_private *sp;
@@ -680,8 +677,8 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
        gpriv = sp;
        gdev = dev;
 #endif
-       sp->sregs = (struct sgiseeq_regs *) &hpc3c0->eth_ext[0];
-       sp->hregs = &hpc3c0->ethregs;
+       sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
+       sp->hregs = &hpcregs->ethregs;
        sp->name = sgiseeqstr;
        sp->mode = SEEQ_RCMD_RBCAST;
 
@@ -698,6 +695,11 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
        setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS);
        setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS);
 
+       /* Setup PIO and DMA transfer timing */
+       sp->hregs->pconfig = 0x161;
+       sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
+                            HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
+
        /* Reset the chip. */
        hpc3_eth_reset(sp->hregs);
 
@@ -724,7 +726,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
                goto err_out_free_page;
        }
 
-       printk(KERN_INFO "%s: SGI Seeq8003 ", dev->name);
+       printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr);
        for (i = 0; i < 6; i++)
                printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
 
@@ -734,7 +736,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
        return 0;
 
 err_out_free_page:
-       free_page((unsigned long) sp);
+       free_page((unsigned long) sp->srings);
 err_out_free_dev:
        kfree(dev);
 
@@ -744,8 +746,6 @@ err_out:
 
 static int __init sgiseeq_probe(void)
 {
-       printk(version);
-
        /* On board adapter on 1st HPC is always present */
        return sgiseeq_init(hpc3c0, SGI_ENET_IRQ);
 }
@@ -754,15 +754,12 @@ static void __exit sgiseeq_exit(void)
 {
        struct net_device *next, *dev;
        struct sgiseeq_private *sp;
-       int irq;
 
        for (dev = root_sgiseeq_dev; dev; dev = next) {
                sp = (struct sgiseeq_private *) netdev_priv(dev);
                next = sp->next_module;
-               irq = dev->irq;
                unregister_netdev(dev);
-               free_irq(irq, dev);
-               free_page((unsigned long) sp);
+               free_page((unsigned long) sp->srings);
                free_netdev(dev);
        }
 }
@@ -770,4 +767,6 @@ static void __exit sgiseeq_exit(void)
 module_init(sgiseeq_probe);
 module_exit(sgiseeq_exit);
 
+MODULE_DESCRIPTION("SGI Seeq 8003 driver");
+MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
 MODULE_LICENSE("GPL");
index c2e6484ef138f6e0c8d7548fd340a61028c85c81..572f121b1f4edaf29533eba3d3c0bd7c7a4e959b 100644 (file)
@@ -730,6 +730,7 @@ static struct ethtool_ops skge_ethtool_ops = {
        .phys_id        = skge_phys_id,
        .get_stats_count = skge_get_stats_count,
        .get_ethtool_stats = skge_get_ethtool_stats,
+       .get_perm_addr  = ethtool_op_get_perm_addr,
 };
 
 /*
@@ -3096,6 +3097,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 
        /* read the mac address */
        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        /* device is off until link detection */
        netif_carrier_off(dev);
index f88f5e32b7145164f916a58e2d23efe3d20fca76..cfaf47c63c58dd1fbeb6c3c5f59290fa49fad804 100644 (file)
@@ -214,7 +214,8 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
 {
        struct bmac_init_block *bb = bp->bmac_block;
        struct net_device *dev = bp->dev;
-       int i, gfp_flags = GFP_KERNEL;
+       int i;
+       gfp_t gfp_flags = GFP_KERNEL;
 
        if (from_irq || in_interrupt())
                gfp_flags = GFP_ATOMIC;
index 5674003fc38aa70b73e4b7937ec811e3c0515851..b0dbc5187143690b4fcab25070bd3703fca142dd 100644 (file)
@@ -339,7 +339,7 @@ struct bigmac {
 #define ALIGNED_RX_SKB_ADDR(addr) \
         ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
 
-static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, int gfp_flags)
+static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
 {
        struct sk_buff *skb;
 
index d500a5771dbc5c05f5f03428b71f58e21b96530f..5de0554fd7c67e7503d75d150bed0dc73348c553 100644 (file)
@@ -518,6 +518,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
 #else
        int bar = 1;
 #endif
+       int phy, phy_idx = 0;
 
 
 /* when built into the kernel, we only print version if device is found */
@@ -549,6 +550,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
        for (i = 0; i < 3; i++)
                ((u16 *)dev->dev_addr)[i] =
                        le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        dev->base_addr = (unsigned long)ioaddr;
        dev->irq = irq;
@@ -605,33 +607,31 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
                        printk("%2.2x:", dev->dev_addr[i]);
        printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
 
-       if (1) {
-               int phy, phy_idx = 0;
-               np->phys[0] = 1;                /* Default setting */
-               np->mii_preamble_required++;
-               for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
-                       int mii_status = mdio_read(dev, phy, MII_BMSR);
-                       if (mii_status != 0xffff  &&  mii_status != 0x0000) {
-                               np->phys[phy_idx++] = phy;
-                               np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
-                               if ((mii_status & 0x0040) == 0)
-                                       np->mii_preamble_required++;
-                               printk(KERN_INFO "%s: MII PHY found at address %d, status "
-                                          "0x%4.4x advertising %4.4x.\n",
-                                          dev->name, phy, mii_status, np->mii_if.advertising);
-                       }
-               }
-               np->mii_preamble_required--;
-
-               if (phy_idx == 0) {
-                       printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
-                                  dev->name, ioread32(ioaddr + ASICCtrl));
-                       goto err_out_unregister;
+       np->phys[0] = 1;                /* Default setting */
+       np->mii_preamble_required++;
+       for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
+               int mii_status = mdio_read(dev, phy, MII_BMSR);
+               int phyx = phy & 0x1f;
+               if (mii_status != 0xffff  &&  mii_status != 0x0000) {
+                       np->phys[phy_idx++] = phyx;
+                       np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
+                       if ((mii_status & 0x0040) == 0)
+                               np->mii_preamble_required++;
+                       printk(KERN_INFO "%s: MII PHY found at address %d, status "
+                                  "0x%4.4x advertising %4.4x.\n",
+                                  dev->name, phyx, mii_status, np->mii_if.advertising);
                }
+       }
+       np->mii_preamble_required--;
 
-               np->mii_if.phy_id = np->phys[0];
+       if (phy_idx == 0) {
+               printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
+                          dev->name, ioread32(ioaddr + ASICCtrl));
+               goto err_out_unregister;
        }
 
+       np->mii_if.phy_id = np->phys[0];
+
        /* Parse override configuration */
        np->an_enable = 1;
        if (card_idx < MAX_UNITS) {
@@ -692,7 +692,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
        /* Reset the chip to erase previous misconfiguration. */
        if (netif_msg_hw(np))
                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
-       iowrite16(0x007f, ioaddr + ASICCtrl + 2);
+       iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
        if (netif_msg_hw(np))
                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 
@@ -1619,6 +1619,7 @@ static struct ethtool_ops ethtool_ops = {
        .get_link = get_link,
        .get_msglevel = get_msglevel,
        .set_msglevel = set_msglevel,
+       .get_perm_addr = ethtool_op_get_perm_addr,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 32057e65808bdafe5a9ed826800eb1f7e1ff747e..9f491563944e917efc754b3d9c59645cefbc24a6 100644 (file)
@@ -318,7 +318,7 @@ static void ibmtr_cleanup_card(struct net_device *dev)
        if (dev->base_addr) {
                outb(0,dev->base_addr+ADAPTRESET);
                
-               schedule_timeout(TR_RST_TIME); /* wait 50ms */
+               schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
 
                outb(0,dev->base_addr+ADAPTRESETREL);
        }
@@ -854,8 +854,7 @@ static int tok_init_card(struct net_device *dev)
        writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
        outb(0, PIOaddr + ADAPTRESET);
 
-       current->state=TASK_UNINTERRUPTIBLE;
-       schedule_timeout(TR_RST_TIME); /* wait 50ms */
+       schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
 
        outb(0, PIOaddr + ADAPTRESETREL);
 #ifdef ENABLE_PAGING
@@ -903,8 +902,8 @@ static int tok_open(struct net_device *dev)
                        DPRINTK("Adapter is up and running\n");
                        return 0;
                }
-               current->state=TASK_INTERRUPTIBLE;
-               i=schedule_timeout(TR_RETRY_INTERVAL); /* wait 30 seconds */
+               i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
+                                                       /* wait 30 seconds */
                if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
        }
        outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
index 9e7923192a49f0f01e6f7ffef375da1351938203..05477d24fd49c5359d9989062ad2bd4688d228c1 100644 (file)
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
 
        while(olympic_priv->srb_queued) {
 
-               t = schedule_timeout(60*HZ); 
+               t = schedule_timeout_interruptible(60*HZ);
 
                if(signal_pending(current))     {            
                        printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
index 2e39bf1f74620f88cf5d03fce1a71ac0f1e107fb..c1925590a0e110e431a0434901349f47d27f1384 100644 (file)
@@ -1243,8 +1243,7 @@ void tms380tr_wait(unsigned long time)
        
        tmp = jiffies + time/(1000000/HZ);
        do {
-               current->state          = TASK_INTERRUPTIBLE;
-               tmp = schedule_timeout(tmp);
+               tmp = schedule_timeout_interruptible(tmp);
        } while(time_after(tmp, jiffies));
 #else
        udelay(time);
index a22d00198e4d6b5cde5b46ebff6cb1e569e6f68e..6b8eee8f7bfd1ddc611498812a59ac7eb8f1976b 100644 (file)
@@ -1787,10 +1787,15 @@ static void __init de21041_get_srom_info (struct de_private *de)
        /* DEC now has a specification but early board makers
           just put the address in the first EEPROM locations. */
        /* This does  memcmp(eedata, eedata+16, 8) */
+
+#ifndef CONFIG_MIPS_COBALT
+
        for (i = 0; i < 8; i ++)
                if (ee_data[i] != ee_data[16+i])
                        sa_offset = 20;
 
+#endif
+
        /* store MAC address */
        for (i = 0; i < 6; i ++)
                de->dev->dev_addr[i] = ee_data[i + sa_offset];
index ecfa6f8805ce0ef050448e777e68364b7d50a32e..4c76cb794bfbb48ad0a5c4954cd85ee8324156ee 100644 (file)
@@ -419,10 +419,9 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
                           TYPHOON_STATUS_WAITING_FOR_HOST)
                                goto out;
 
-                       if(wait_type == WaitSleep) {
-                               set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(1);
-                       } else
+                       if(wait_type == WaitSleep)
+                               schedule_timeout_uninterruptible(1);
+                       else
                                udelay(TYPHOON_UDELAY);
                }
 
index fc7738ffbfffeb1ffd17c7724426169b65531372..2418715892833608e91147ffd169dde1a2f86566 100644 (file)
@@ -490,6 +490,8 @@ struct rhine_private {
        u8 tx_thresh, rx_thresh;
 
        struct mii_if_info mii_if;
+       struct work_struct tx_timeout_task;
+       struct work_struct check_media_task;
        void __iomem *base;
 };
 
@@ -497,6 +499,8 @@ static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
 static void rhine_tx_timeout(struct net_device *dev);
+static void rhine_tx_timeout_task(struct net_device *dev);
+static void rhine_check_media_task(struct net_device *dev);
 static int  rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
 static void rhine_tx(struct net_device *dev);
@@ -814,8 +818,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
 
        for (i = 0; i < 6; i++)
                dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       if (!is_valid_ether_addr(dev->dev_addr)) {
+       if (!is_valid_ether_addr(dev->perm_addr)) {
                rc = -EIO;
                printk(KERN_ERR "Invalid MAC address\n");
                goto err_out_unmap;
@@ -850,6 +855,12 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
        if (rp->quirks & rqRhineI)
                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
+       INIT_WORK(&rp->tx_timeout_task,
+                 (void (*)(void *))rhine_tx_timeout_task, dev);
+
+       INIT_WORK(&rp->check_media_task,
+                 (void (*)(void *))rhine_check_media_task, dev);
+
        /* dev->name not defined before register_netdev()! */
        rc = register_netdev(dev);
        if (rc)
@@ -1076,6 +1087,11 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
                   ioaddr + ChipCmd1);
 }
 
+static void rhine_check_media_task(struct net_device *dev)
+{
+       rhine_check_media(dev, 0);
+}
+
 static void init_registers(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
@@ -1129,8 +1145,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
        if (quirks & rqRhineI) {
                iowrite8(0x01, ioaddr + MIIRegAddr);    // MII_BMSR
 
-               /* Can be called from ISR. Evil. */
-               mdelay(1);
+               /* Do not call from ISR! */
+               msleep(1);
 
                /* 0x80 must be set immediately before turning it off */
                iowrite8(0x80, ioaddr + MIICmd);
@@ -1218,6 +1234,16 @@ static int rhine_open(struct net_device *dev)
 }
 
 static void rhine_tx_timeout(struct net_device *dev)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+
+       /*
+        * Move bulk of work outside of interrupt context
+        */
+       schedule_work(&rp->tx_timeout_task);
+}
+
+static void rhine_tx_timeout_task(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
        void __iomem *ioaddr = rp->base;
@@ -1625,7 +1651,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
        spin_lock(&rp->lock);
 
        if (intr_status & IntrLinkChange)
-               rhine_check_media(dev, 0);
+               schedule_work(&rp->check_media_task);
        if (intr_status & IntrStatsMax) {
                rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
                rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1829,6 +1855,7 @@ static struct ethtool_ops netdev_ethtool_ops = {
        .set_wol                = rhine_set_wol,
        .get_sg                 = ethtool_op_get_sg,
        .get_tx_csum            = ethtool_op_get_tx_csum,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1872,6 +1899,9 @@ static int rhine_close(struct net_device *dev)
        spin_unlock_irq(&rp->lock);
 
        free_irq(rp->pdev->irq, dev);
+
+       flush_scheduled_work();
+
        free_rbufs(dev);
        free_tbufs(dev);
        free_ring(dev);
index 7ff814fd65d0826596261f76448911b4579efe2e..ae9e897c255ec04576d1c58b28fde673bdc4be55 100644 (file)
@@ -1617,8 +1617,7 @@ static int get_wait_data(struct cosa_data *cosa)
                        return r;
                }
                /* sleep if not ready to read */
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n",
                cosa_getstatus(cosa));
@@ -1644,8 +1643,7 @@ static int put_wait_data(struct cosa_data *cosa, int data)
                }
 #if 0
                /* sleep if not ready to read */
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
 #endif
        }
        printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n",
index 9e56fc346ba4f72b53e0031167803165bf0dde1d..e6d005726aadea2875fca2e747cee73aaa9225db 100644 (file)
@@ -109,7 +109,7 @@ static long cycx_2x_irq_options[]  = { 7, 3, 5, 9, 10, 11, 12, 15 };
  *             < 0     error.
  * Context:    process */
 
-int __init cycx_drv_init(void)
+static int __init cycx_drv_init(void)
 {
        printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
                         copyright);
@@ -119,7 +119,7 @@ int __init cycx_drv_init(void)
 
 /* Module 'remove' entry point.
  * o release all remaining system resources */
-void cycx_drv_cleanup(void)
+static void cycx_drv_cleanup(void)
 {
 }
 
@@ -184,8 +184,7 @@ int cycx_down(struct cycx_hw *hw)
 }
 
 /* Enable interrupt generation.  */
-EXPORT_SYMBOL(cycx_inten);
-void cycx_inten(struct cycx_hw *hw)
+static void cycx_inten(struct cycx_hw *hw)
 {
        writeb(0, hw->dpmbase);
 }
index 7b48064364dc7896440ab70b6d61d498789a57ce..430b1f630fb4a71147992411b3ad0eb71e997ad6 100644 (file)
@@ -103,7 +103,7 @@ static struct cycx_device *cycx_card_array; /* adapter data space */
  *             < 0     error.
  * Context:    process
  */
-int __init cycx_init(void)
+static int __init cycx_init(void)
 {
        int cnt, err = -ENOMEM;
 
index 02d57c0b4243ac8ab61e4db9a3eb3e6e90d66d82..a631d1c2fa148a7191ac775baab2f2c4a82fe12b 100644 (file)
@@ -78,6 +78,7 @@
 
 #define CYCLOMX_X25_DEBUG 1
 
+#include <linux/ctype.h>       /* isdigit() */
 #include <linux/errno.h>       /* return codes */
 #include <linux/if_arp.h>       /* ARPHRD_HWX25 */
 #include <linux/kernel.h>      /* printk(), and other useful stuff */
@@ -418,7 +419,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
 
                /* Set channel timeouts (default if not specified) */
                chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
-       } else if (is_digit(conf->addr[0])) {   /* PVC */
+       } else if (isdigit(conf->addr[0])) {    /* PVC */
                s16 lcn = dec_to_uint(conf->addr, 0);
 
                if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
@@ -1531,7 +1532,7 @@ static unsigned dec_to_uint(u8 *str, int len)
        if (!len)
                len = strlen(str);
 
-       for (; len && is_digit(*str); ++str, --len)
+       for (; len && isdigit(*str); ++str, --len)
                val = (val * 10) + (*str - (unsigned) '0');
 
        return val;
index 520a77a798e25482ee52852fa97d17edfc7e2191..2f61a47b4716947d331dca4a5e2ee2d311c05d15 100644 (file)
@@ -446,8 +446,8 @@ static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
        return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
 }
 
-int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
-               const char *msg)
+static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
+                      struct net_device *dev, const char *msg)
 {
        int ret = 0;
 
@@ -466,8 +466,9 @@ int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
        return ret;
 }
 
-void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv,
-                   char *msg)
+static void dscc4_tx_print(struct net_device *dev,
+                          struct dscc4_dev_priv *dpriv,
+                          char *msg)
 {
        printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
               dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
@@ -507,7 +508,8 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
        }
 }
 
-inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
+                                struct net_device *dev)
 {
        unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
        struct RxFD *rx_fd = dpriv->rx_fd + dirty;
@@ -542,8 +544,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
                               msg, i);
                        goto done;
                }
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(10);
+               schedule_timeout_uninterruptible(10);
                rmb();
        } while (++i > 0);
        printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
@@ -588,8 +589,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
                    (dpriv->iqtx[cur] & Xpr))
                        break;
                smp_rmb();
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(10);
+               schedule_timeout_uninterruptible(10);
        } while (++i > 0);
 
        return (i >= 0 ) ? i : -EAGAIN;
@@ -1035,8 +1035,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
        /* Flush posted writes */
        readl(ioaddr + GSTAR);
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(10);
+       schedule_timeout_uninterruptible(10);
 
        for (i = 0; i < 16; i++)
                pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1894,7 +1893,7 @@ try:
  * It failed and locked solid. Thus the introduction of a dummy skb.
  * Problem is acknowledged in errata sheet DS5. Joy :o/
  */
-struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
+static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
 {
        struct sk_buff *skb;
 
index 2c83cca34b8698cc269a0feda4def2b5a5d00c9d..7981a2c7906e0522aa3a34c4cd9ba7f6c84637fc 100644 (file)
@@ -74,11 +74,11 @@ MODULE_LICENSE("GPL");
 /*
  * Modules parameters and associated varaibles
  */
-int fst_txq_low = FST_LOW_WATER_MARK;
-int fst_txq_high = FST_HIGH_WATER_MARK;
-int fst_max_reads = 7;
-int fst_excluded_cards = 0;
-int fst_excluded_list[FST_MAX_CARDS];
+static int fst_txq_low = FST_LOW_WATER_MARK;
+static int fst_txq_high = FST_HIGH_WATER_MARK;
+static int fst_max_reads = 7;
+static int fst_excluded_cards = 0;
+static int fst_excluded_list[FST_MAX_CARDS];
 
 module_param(fst_txq_low, int, 0);
 module_param(fst_txq_high, int, 0);
@@ -572,13 +572,13 @@ static void do_bottom_half_rx(struct fst_card_info *card);
 static void fst_process_tx_work_q(unsigned long work_q);
 static void fst_process_int_work_q(unsigned long work_q);
 
-DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
-DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
+static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
+static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
 
-struct fst_card_info *fst_card_array[FST_MAX_CARDS];
-spinlock_t fst_work_q_lock;
-u64 fst_work_txq;
-u64 fst_work_intq;
+static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
+static spinlock_t fst_work_q_lock;
+static u64 fst_work_txq;
+static u64 fst_work_intq;
 
 static void
 fst_q_work_item(u64 * queue, int card_index)
@@ -980,8 +980,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
        /* Wait for any previous command to complete */
        while (mbval > NAK) {
                spin_unlock_irqrestore(&card->card_lock, flags);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
                spin_lock_irqsave(&card->card_lock, flags);
 
                if (++safety > 2000) {
@@ -1498,7 +1497,7 @@ do_bottom_half_rx(struct fst_card_info *card)
  *      The interrupt service routine
  *      Dev_id is our fst_card_info pointer
  */
-irqreturn_t
+static irqreturn_t
 fst_intr(int irq, void *dev_id, struct pt_regs *regs)
 {
        struct fst_card_info *card;
index a5d6891c9d4c3959fcb895d8dc1a2500a8eb6200..e1601d35dceddf133379a57155ca9472294b2eac 100644 (file)
@@ -330,7 +330,7 @@ static int pvc_close(struct net_device *dev)
 
 
 
-int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        pvc_device *pvc = dev_to_pvc(dev);
        fr_proto_pvc_info info;
index 9dccd9546a17b829b0816866d627d9d25c7bd0c5..3b94352b0d03178b31cdf5d364beaffd062c0390 100644 (file)
@@ -8,10 +8,10 @@
 /*
  * Prints out len, max to 80 octets using printk, 20 per line
  */
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
-{
 #ifdef DEBUG
 #ifdef LMC_PACKET_LOG
+void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
+{
   int iNewLine = 1;
   char str[80], *pstr;
   
@@ -43,26 +43,24 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
     }
   sprintf(pstr, "\n");
   printk(str);
+}
 #endif
 #endif
-}
 
 #ifdef DEBUG
 u_int32_t lmcEventLogIndex = 0;
 u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-#endif
 
 void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3)
 {
-#ifdef DEBUG
   lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
   lmcEventLogBuf[lmcEventLogIndex++] = arg2;
   lmcEventLogBuf[lmcEventLogIndex++] = arg3;
   lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
 
   lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
-#endif
 }
+#endif  /*  DEBUG  */
 
 void lmc_trace(struct net_device *dev, char *msg){
 #ifdef LMC_TRACE
index f55ce76b00edd4545fe826f6844b338acd1cc46d..af8b55fdd9d98008bfe040b895ae734bfafb088b 100644 (file)
   * of the GNU General Public License version 2, incorporated herein by reference.
   */
 
-/*
- * For lack of a better place, put the SSI cable stuff here.
- */
-char *lmc_t1_cables[] = {
-  "V.10/RS423", "EIA530A", "reserved", "X.21", "V.35",
-  "EIA449/EIA530/V.36", "V.28/EIA232", "none", NULL
-};
-
 /*
  * protocol independent method.
  */
index 73401b0f01517ca8bb533acdfae0b05d6a175a70..2024b26b99e6e2728945af0c553455cb1bdaef9a 100644 (file)
@@ -472,24 +472,8 @@ enum pc300_loopback_cmds {
 
 #ifdef __KERNEL__
 /* Function Prototypes */
-int dma_buf_write(pc300_t *, int, ucchar *, int);
-int dma_buf_read(pc300_t *, int, struct sk_buff *);
 void tx_dma_start(pc300_t *, int);
-void rx_dma_start(pc300_t *, int);
-void tx_dma_stop(pc300_t *, int);
-void rx_dma_stop(pc300_t *, int);
-int cpc_queue_xmit(struct sk_buff *, struct net_device *);
-void cpc_net_rx(struct net_device *);
-void cpc_sca_status(pc300_t *, int);
-int cpc_change_mtu(struct net_device *, int);
-int cpc_ioctl(struct net_device *, struct ifreq *, int);
-int ch_config(pc300dev_t *);
-int rx_config(pc300dev_t *);
-int tx_config(pc300dev_t *);
-void cpc_opench(pc300dev_t *);
-void cpc_closech(pc300dev_t *);
 int cpc_open(struct net_device *dev);
-int cpc_close(struct net_device *dev);
 int cpc_set_media(hdlc_device *, int);
 #endif /* __KERNEL__ */
 
index 3e7753b1071759f147851c69ed38e0317d13f994..a3e65d1bc19bbcddda71bd74f2ec707a6a0bf5f1 100644 (file)
@@ -291,6 +291,7 @@ static uclong detect_ram(pc300_t *);
 static void plx_init(pc300_t *);
 static void cpc_trace(struct net_device *, struct sk_buff *, char);
 static int cpc_attach(struct net_device *, unsigned short, unsigned short);
+static int cpc_close(struct net_device *dev);
 
 #ifdef CONFIG_PC300_MLPPP
 void cpc_tty_init(pc300dev_t * dev);
@@ -437,7 +438,7 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
        printk("\n");
 }
 
-int dma_get_rx_frame_size(pc300_t * card, int ch)
+static int dma_get_rx_frame_size(pc300_t * card, int ch)
 {
        volatile pcsca_bd_t __iomem *ptdescr;
        ucshort first_bd = card->chan[ch].rx_first_bd;
@@ -462,7 +463,7 @@ int dma_get_rx_frame_size(pc300_t * card, int ch)
  * dma_buf_write: writes a frame to the Tx DMA buffers
  * NOTE: this function writes one frame at a time.
  */
-int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
+static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
 {
        int i, nchar;
        volatile pcsca_bd_t __iomem *ptdescr;
@@ -503,7 +504,7 @@ int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
  * dma_buf_read: reads a frame from the Rx DMA buffers
  * NOTE: this function reads one frame at a time.
  */
-int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
+static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
 {
        int nchar;
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -560,7 +561,7 @@ int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
        return (rcvd);
 }
 
-void tx_dma_stop(pc300_t * card, int ch)
+static void tx_dma_stop(pc300_t * card, int ch)
 {
        void __iomem *scabase = card->hw.scabase;
        ucchar drr_ena_bit = 1 << (5 + 2 * ch);
@@ -571,7 +572,7 @@ void tx_dma_stop(pc300_t * card, int ch)
        cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
 }
 
-void rx_dma_stop(pc300_t * card, int ch)
+static void rx_dma_stop(pc300_t * card, int ch)
 {
        void __iomem *scabase = card->hw.scabase;
        ucchar drr_ena_bit = 1 << (4 + 2 * ch);
@@ -582,7 +583,7 @@ void rx_dma_stop(pc300_t * card, int ch)
        cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
 }
 
-void rx_dma_start(pc300_t * card, int ch)
+static void rx_dma_start(pc300_t * card, int ch)
 {
        void __iomem *scabase = card->hw.scabase;
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -607,7 +608,7 @@ void rx_dma_start(pc300_t * card, int ch)
 /*************************/
 /***   FALC Routines   ***/
 /*************************/
-void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
+static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
 {
        void __iomem *falcbase = card->hw.falcbase;
        unsigned long i = 0;
@@ -622,7 +623,7 @@ void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
        cpc_writeb(falcbase + F_REG(CMDR, ch), cmd);
 }
 
-void falc_intr_enable(pc300_t * card, int ch)
+static void falc_intr_enable(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -672,7 +673,7 @@ void falc_intr_enable(pc300_t * card, int ch)
        }
 }
 
-void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
+static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
 {
        void __iomem *falcbase = card->hw.falcbase;
        ucchar tshf = card->chan[ch].falc.offset;
@@ -688,7 +689,7 @@ void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
                        (0x80 >> (timeslot & 0x07)));
 }
 
-void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
+static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
 {
        void __iomem *falcbase = card->hw.falcbase;
        ucchar tshf = card->chan[ch].falc.offset;
@@ -704,7 +705,7 @@ void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
                   ~(0x80 >> (timeslot & 0x07)));
 }
 
-void falc_close_all_timeslots(pc300_t * card, int ch)
+static void falc_close_all_timeslots(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -726,7 +727,7 @@ void falc_close_all_timeslots(pc300_t * card, int ch)
        }
 }
 
-void falc_open_all_timeslots(pc300_t * card, int ch)
+static void falc_open_all_timeslots(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -758,7 +759,7 @@ void falc_open_all_timeslots(pc300_t * card, int ch)
        }
 }
 
-void falc_init_timeslot(pc300_t * card, int ch)
+static void falc_init_timeslot(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -776,7 +777,7 @@ void falc_init_timeslot(pc300_t * card, int ch)
        }
 }
 
-void falc_enable_comm(pc300_t * card, int ch)
+static void falc_enable_comm(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        falc_t *pfalc = (falc_t *) & chan->falc;
@@ -792,7 +793,7 @@ void falc_enable_comm(pc300_t * card, int ch)
                   ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
 }
 
-void falc_disable_comm(pc300_t * card, int ch)
+static void falc_disable_comm(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        falc_t *pfalc = (falc_t *) & chan->falc;
@@ -806,7 +807,7 @@ void falc_disable_comm(pc300_t * card, int ch)
                   ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
 }
 
-void falc_init_t1(pc300_t * card, int ch)
+static void falc_init_t1(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -975,7 +976,7 @@ void falc_init_t1(pc300_t * card, int ch)
        falc_close_all_timeslots(card, ch);
 }
 
-void falc_init_e1(pc300_t * card, int ch)
+static void falc_init_e1(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1155,7 +1156,7 @@ void falc_init_e1(pc300_t * card, int ch)
        falc_close_all_timeslots(card, ch);
 }
 
-void falc_init_hdlc(pc300_t * card, int ch)
+static void falc_init_hdlc(pc300_t * card, int ch)
 {
        void __iomem *falcbase = card->hw.falcbase;
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -1181,7 +1182,7 @@ void falc_init_hdlc(pc300_t * card, int ch)
        falc_intr_enable(card, ch);
 }
 
-void te_config(pc300_t * card, int ch)
+static void te_config(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1241,7 +1242,7 @@ void te_config(pc300_t * card, int ch)
        CPC_UNLOCK(card, flags);
 }
 
-void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
+static void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1397,7 +1398,7 @@ void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
        }
 }
 
-void falc_update_stats(pc300_t * card, int ch)
+static void falc_update_stats(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1450,7 +1451,7 @@ void falc_update_stats(pc300_t * card, int ch)
  *             the synchronizer and then sent to the system interface.
  *----------------------------------------------------------------------------
  */
-void falc_remote_loop(pc300_t * card, int ch, int loop_on)
+static void falc_remote_loop(pc300_t * card, int ch, int loop_on)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1495,7 +1496,7 @@ void falc_remote_loop(pc300_t * card, int ch, int loop_on)
  *             coding must be identical.
  *----------------------------------------------------------------------------
  */
-void falc_local_loop(pc300_t * card, int ch, int loop_on)
+static void falc_local_loop(pc300_t * card, int ch, int loop_on)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1522,7 +1523,7 @@ void falc_local_loop(pc300_t * card, int ch, int loop_on)
  *             looped. They are originated by the FALC-LH transmitter.
  *----------------------------------------------------------------------------
  */
-void falc_payload_loop(pc300_t * card, int ch, int loop_on)
+static void falc_payload_loop(pc300_t * card, int ch, int loop_on)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1576,7 +1577,7 @@ void falc_payload_loop(pc300_t * card, int ch, int loop_on)
  * Description:        Turns XLU bit off in the proper register
  *----------------------------------------------------------------------------
  */
-void turn_off_xlu(pc300_t * card, int ch)
+static void turn_off_xlu(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1597,7 +1598,7 @@ void turn_off_xlu(pc300_t * card, int ch)
  * Description: Turns XLD bit off in the proper register
  *----------------------------------------------------------------------------
  */
-void turn_off_xld(pc300_t * card, int ch)
+static void turn_off_xld(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1619,7 +1620,7 @@ void turn_off_xld(pc300_t * card, int ch)
  *             to generate a LOOP activation code over a T1/E1 line.
  *----------------------------------------------------------------------------
  */
-void falc_generate_loop_up_code(pc300_t * card, int ch)
+static void falc_generate_loop_up_code(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1652,7 +1653,7 @@ void falc_generate_loop_up_code(pc300_t * card, int ch)
  *             to generate a LOOP deactivation code over a T1/E1 line.
  *----------------------------------------------------------------------------
  */
-void falc_generate_loop_down_code(pc300_t * card, int ch)
+static void falc_generate_loop_down_code(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1682,7 +1683,7 @@ void falc_generate_loop_down_code(pc300_t * card, int ch)
  *             it on the reception side.
  *----------------------------------------------------------------------------
  */
-void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
+static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1729,7 +1730,7 @@ void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
  * Description:        This routine returns the bit error counter value
  *----------------------------------------------------------------------------
  */
-ucshort falc_pattern_test_error(pc300_t * card, int ch)
+static ucshort falc_pattern_test_error(pc300_t * card, int ch)
 {
        pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
        falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1769,7 +1770,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
        netif_rx(skb);
 }
 
-void cpc_tx_timeout(struct net_device *dev)
+static void cpc_tx_timeout(struct net_device *dev)
 {
        pc300dev_t *d = (pc300dev_t *) dev->priv;
        pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -1797,7 +1798,7 @@ void cpc_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
-int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        pc300dev_t *d = (pc300dev_t *) dev->priv;
        pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -1880,7 +1881,7 @@ int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
-void cpc_net_rx(struct net_device *dev)
+static void cpc_net_rx(struct net_device *dev)
 {
        pc300dev_t *d = (pc300dev_t *) dev->priv;
        pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -2403,7 +2404,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id, struct pt_regs *regs)
        return IRQ_HANDLED;
 }
 
-void cpc_sca_status(pc300_t * card, int ch)
+static void cpc_sca_status(pc300_t * card, int ch)
 {
        ucchar ilar;
        void __iomem *scabase = card->hw.scabase;
@@ -2495,7 +2496,7 @@ void cpc_sca_status(pc300_t * card, int ch)
        }
 }
 
-void cpc_falc_status(pc300_t * card, int ch)
+static void cpc_falc_status(pc300_t * card, int ch)
 {
        pc300ch_t *chan = &card->chan[ch];
        falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2523,7 +2524,7 @@ void cpc_falc_status(pc300_t * card, int ch)
        CPC_UNLOCK(card, flags);
 }
 
-int cpc_change_mtu(struct net_device *dev, int new_mtu)
+static int cpc_change_mtu(struct net_device *dev, int new_mtu)
 {
        if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU))
                return -EINVAL;
@@ -2531,7 +2532,7 @@ int cpc_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        hdlc_device *hdlc = dev_to_hdlc(dev);
        pc300dev_t *d = (pc300dev_t *) dev->priv;
@@ -2856,7 +2857,7 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
        }
 }
 
-int ch_config(pc300dev_t * d)
+static int ch_config(pc300dev_t * d)
 {
        pc300ch_t *chan = (pc300ch_t *) d->chan;
        pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -3004,7 +3005,7 @@ int ch_config(pc300dev_t * d)
        return 0;
 }
 
-int rx_config(pc300dev_t * d)
+static int rx_config(pc300dev_t * d)
 {
        pc300ch_t *chan = (pc300ch_t *) d->chan;
        pc300_t *card = (pc300_t *) chan->card;
@@ -3035,7 +3036,7 @@ int rx_config(pc300dev_t * d)
        return 0;
 }
 
-int tx_config(pc300dev_t * d)
+static int tx_config(pc300dev_t * d)
 {
        pc300ch_t *chan = (pc300ch_t *) d->chan;
        pc300_t *card = (pc300_t *) chan->card;
@@ -3098,7 +3099,7 @@ static int cpc_attach(struct net_device *dev, unsigned short encoding,
        return 0;
 }
 
-void cpc_opench(pc300dev_t * d)
+static void cpc_opench(pc300dev_t * d)
 {
        pc300ch_t *chan = (pc300ch_t *) d->chan;
        pc300_t *card = (pc300_t *) chan->card;
@@ -3116,7 +3117,7 @@ void cpc_opench(pc300dev_t * d)
                   cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
 }
 
-void cpc_closech(pc300dev_t * d)
+static void cpc_closech(pc300dev_t * d)
 {
        pc300ch_t *chan = (pc300ch_t *) d->chan;
        pc300_t *card = (pc300_t *) chan->card;
@@ -3173,7 +3174,7 @@ int cpc_open(struct net_device *dev)
        return 0;
 }
 
-int cpc_close(struct net_device *dev)
+static int cpc_close(struct net_device *dev)
 {
        hdlc_device *hdlc = dev_to_hdlc(dev);
        pc300dev_t *d = (pc300dev_t *) dev->priv;
index 8454bf6caaa70136f6106869a30a8f61899c0002..52f26b9c69d245e9aa090e374964f04d5895f7b0 100644 (file)
@@ -112,10 +112,10 @@ typedef   struct _st_cpc_tty_area {
 static struct tty_driver serial_drv;
 
 /* local variables */
-st_cpc_tty_area        cpc_tty_area[CPC_TTY_NPORTS];
+static st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS];
 
-int cpc_tty_cnt=0;     /* number of intrfaces configured with MLPPP */
-int cpc_tty_unreg_flag = 0;
+static int cpc_tty_cnt = 0;    /* number of intrfaces configured with MLPPP */
+static int cpc_tty_unreg_flag = 0;
 
 /* TTY functions prototype */
 static int cpc_tty_open(struct tty_struct *tty, struct file *flip);
@@ -132,9 +132,9 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
 static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
 static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
 
-int pc300_tiocmset(struct tty_struct *, struct file *,
-                       unsigned int, unsigned int);
-int pc300_tiocmget(struct tty_struct *, struct file *);
+static int pc300_tiocmset(struct tty_struct *, struct file *,
+                         unsigned int, unsigned int);
+static int pc300_tiocmget(struct tty_struct *, struct file *);
 
 /* functions called by PC300 driver */
 void cpc_tty_init(pc300dev_t *dev);
@@ -538,8 +538,8 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
        return(0); 
 } 
 
-int pc300_tiocmset(struct tty_struct *tty, struct file *file,
-                       unsigned int set, unsigned int clear)
+static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
+                         unsigned int set, unsigned int clear)
 {
        st_cpc_tty_area    *cpc_tty; 
 
@@ -565,7 +565,7 @@ int pc300_tiocmset(struct tty_struct *tty, struct file *file,
        return 0;
 }
 
-int pc300_tiocmget(struct tty_struct *tty, struct file *file)
+static int pc300_tiocmget(struct tty_struct *tty, struct file *file)
 {
        unsigned int result;
        unsigned char status;
index 3ac9a45b20fae7da4de35a3a4ae8055f90ee2501..036adc4f8ba7e85dab072fb7595cbe8f57e1e8d5 100644 (file)
@@ -182,7 +182,7 @@ static char sdla_byte(struct net_device *dev, int addr)
        return(byte);
 }
 
-void sdla_stop(struct net_device *dev)
+static void sdla_stop(struct net_device *dev)
 {
        struct frad_local *flp;
 
@@ -209,7 +209,7 @@ void sdla_stop(struct net_device *dev)
        }
 }
 
-void sdla_start(struct net_device *dev)
+static void sdla_start(struct net_device *dev)
 {
        struct frad_local *flp;
 
@@ -247,7 +247,7 @@ void sdla_start(struct net_device *dev)
  *
  ***************************************************/
 
-int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
+static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
 {
        unsigned long start, done, now;
        char          resp, *temp;
@@ -505,7 +505,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
 
 static int sdla_reconfig(struct net_device *dev);
 
-int sdla_activate(struct net_device *slave, struct net_device *master)
+static int sdla_activate(struct net_device *slave, struct net_device *master)
 {
        struct frad_local *flp;
        int i;
@@ -527,7 +527,7 @@ int sdla_activate(struct net_device *slave, struct net_device *master)
        return(0);
 }
 
-int sdla_deactivate(struct net_device *slave, struct net_device *master)
+static int sdla_deactivate(struct net_device *slave, struct net_device *master)
 {
        struct frad_local *flp;
        int               i;
@@ -549,7 +549,7 @@ int sdla_deactivate(struct net_device *slave, struct net_device *master)
        return(0);
 }
 
-int sdla_assoc(struct net_device *slave, struct net_device *master)
+static int sdla_assoc(struct net_device *slave, struct net_device *master)
 {
        struct frad_local *flp;
        int               i;
@@ -585,7 +585,7 @@ int sdla_assoc(struct net_device *slave, struct net_device *master)
        return(0);
 }
 
-int sdla_deassoc(struct net_device *slave, struct net_device *master)
+static int sdla_deassoc(struct net_device *slave, struct net_device *master)
 {
        struct frad_local *flp;
        int               i;
@@ -613,7 +613,7 @@ int sdla_deassoc(struct net_device *slave, struct net_device *master)
        return(0);
 }
 
-int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
+static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
 {
        struct frad_local *flp;
        struct dlci_local *dlp;
@@ -1324,7 +1324,7 @@ NOTE:  This is rather a useless action right now, as the
        return(0);
 }
 
-int sdla_change_mtu(struct net_device *dev, int new_mtu)
+static int sdla_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct frad_local *flp;
 
@@ -1337,7 +1337,7 @@ int sdla_change_mtu(struct net_device *dev, int new_mtu)
        return(-EOPNOTSUPP);
 }
 
-int sdla_set_config(struct net_device *dev, struct ifmap *map)
+static int sdla_set_config(struct net_device *dev, struct ifmap *map)
 {
        struct frad_local *flp;
        int               i;
index 0497dbdb8631fe379c7f952dc2c50ee7c21bd814..7f1ce9d4333e8fc440b957b532f96ca0318ed4d8 100644 (file)
@@ -822,7 +822,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev,
        chan->card = card;
 
        /* verify media address */
-       if (is_digit(conf->addr[0])) {
+       if (isdigit(conf->addr[0])) {
 
                dlci = dec_to_uint(conf->addr, 0);
 
@@ -3456,7 +3456,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len)
        if (!len) 
                len = strlen(str);
 
-       for (val = 0; len && is_digit(*str); ++str, --len)
+       for (val = 0; len && isdigit(*str); ++str, --len)
                val = (val * 10) + (*str - (unsigned)'0');
 
        return val;
index 8a95d61a2f8ff0fa3b980990c13f2f32be00dd25..63f846d6f3a6dd87592db1ac4fd09e0a67a22f83 100644 (file)
@@ -957,7 +957,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev,
                chan->hold_timeout = (conf->hold_timeout) ? 
                                        conf->hold_timeout : 10;
 
-       }else if (is_digit(conf->addr[0])){     /* PVC */
+       }else if (isdigit(conf->addr[0])){      /* PVC */
                int lcn = dec_to_uint(conf->addr, 0);
 
                if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){
@@ -3875,7 +3875,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len)
        if (!len) 
                len = strlen(str);
 
-       for (val = 0; len && is_digit(*str); ++str, --len)
+       for (val = 0; len && isdigit(*str); ++str, --len)
                val = (val * 10) + (*str - (unsigned)'0');
        
        return val;
@@ -3896,9 +3896,9 @@ static unsigned int hex_to_uint (unsigned char* str, int len)
        for (val = 0; len; ++str, --len)
        {
                ch = *str;
-               if (is_digit(ch))
+               if (isdigit(ch))
                        val = (val << 4) + (ch - (unsigned)'0');
-               else if (is_hex_digit(ch))
+               else if (isxdigit(ch))
                        val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10);
                else break;
        }
index c8bc6da57a418b2b62fb18c9ccfa3c06dcea362c..7c2cf2e76300c4f7fce69777653da0742bfdb8d7 100644 (file)
@@ -642,9 +642,7 @@ int sdla_mapmem (sdlahw_t* hw, unsigned long addr)
  * Enable interrupt generation.
  */
 
-EXPORT_SYMBOL(sdla_inten);
-
-int sdla_inten (sdlahw_t* hw)
+static int sdla_inten (sdlahw_t* hw)
 {
        unsigned port = hw->port;
        int tmp, i;
@@ -698,8 +696,7 @@ int sdla_inten (sdlahw_t* hw)
  * Disable interrupt generation.
  */
 
-EXPORT_SYMBOL(sdla_intde);
-
+#if 0
 int sdla_intde (sdlahw_t* hw)
 {
        unsigned port = hw->port;
@@ -748,14 +745,13 @@ int sdla_intde (sdlahw_t* hw)
        }
        return 0;
 }
+#endif  /*  0  */
 
 /*============================================================================
  * Acknowledge SDLA hardware interrupt.
  */
 
-EXPORT_SYMBOL(sdla_intack);
-
-int sdla_intack (sdlahw_t* hw)
+static int sdla_intack (sdlahw_t* hw)
 {
        unsigned port = hw->port;
        int tmp;
@@ -827,8 +823,7 @@ void read_S514_int_stat (sdlahw_t* hw, u32* int_status)
  * Generate an interrupt to adapter's CPU.
  */
 
-EXPORT_SYMBOL(sdla_intr);
-
+#if 0
 int sdla_intr (sdlahw_t* hw)
 {
        unsigned port = hw->port;
@@ -863,6 +858,7 @@ int sdla_intr (sdlahw_t* hw)
        }
        return 0;
 }
+#endif  /*  0  */
 
 /*============================================================================
  * Execute Adapter Command.
index a6d3b55013a5106b379bd5bddf47b4e89f1ad3f4..2d1bba06a08512d5d0a5af5bb8283ee9f9493922 100644 (file)
@@ -221,7 +221,7 @@ static void sppp_clear_timeout(struct sppp *p)
  *     here.
  */
  
-void sppp_input (struct net_device *dev, struct sk_buff *skb)
+static void sppp_input (struct net_device *dev, struct sk_buff *skb)
 {
        struct ppp_header *h;
        struct sppp *sp = (struct sppp *)sppp_of(dev);
@@ -355,8 +355,6 @@ done:
        return;
 }
 
-EXPORT_SYMBOL(sppp_input);
-
 /*
  *     Handle transmit packets.
  */
@@ -990,7 +988,7 @@ EXPORT_SYMBOL(sppp_reopen);
  *     the mtu is out of range.
  */
  
-int sppp_change_mtu(struct net_device *dev, int new_mtu)
+static int sppp_change_mtu(struct net_device *dev, int new_mtu)
 {
        if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
                return -EINVAL;
@@ -998,8 +996,6 @@ int sppp_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-EXPORT_SYMBOL(sppp_change_mtu);
-
 /**
  *     sppp_do_ioctl - Ioctl handler for ppp/hdlc
  *     @dev: Device subject to ioctl
@@ -1456,7 +1452,7 @@ static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_t
        return 0;
 }
 
-struct packet_type sppp_packet_type = {
+static struct packet_type sppp_packet_type = {
        .type   = __constant_htons(ETH_P_WAN_PPP),
        .func   = sppp_rcv,
 };
index 06998c2240d9f167096426a582ec588bebbce7fc..cb429e783749ab2e02f14af9e4cdcdae63c1e6b6 100644 (file)
@@ -1046,7 +1046,6 @@ static WifiCtlHdr wifictlhdr8023 = {
        }
 };
 
-#ifdef WIRELESS_EXT
 // Frequency list (map channels to frequencies)
 static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
                                2447, 2452, 2457, 2462, 2467, 2472, 2484 };
@@ -1067,7 +1066,6 @@ typedef struct wep_key_t {
 
 /* List of Wireless Handlers (new API) */
 static const struct iw_handler_def     airo_handler_def;
-#endif /* WIRELESS_EXT */
 
 static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
 
@@ -1110,10 +1108,8 @@ static irqreturn_t airo_interrupt( int irq, void* dev_id, struct pt_regs
 static int airo_thread(void *data);
 static void timer_func( struct net_device *dev );
 static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-#ifdef WIRELESS_EXT
 static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
 static void airo_read_wireless_stats (struct airo_info *local);
-#endif /* WIRELESS_EXT */
 #ifdef CISCO_EXT
 static int readrids(struct net_device *dev, aironet_ioctl *comp);
 static int writerids(struct net_device *dev, aironet_ioctl *comp);
@@ -1187,12 +1183,10 @@ struct airo_info {
                int fid;
        } xmit, xmit11;
        struct net_device *wifidev;
-#ifdef WIRELESS_EXT
        struct iw_statistics    wstats;         // wireless stats
        unsigned long           scan_timestamp; /* Time started to scan */
        struct iw_spy_data      spy_data;
        struct iw_public_data   wireless_data;
-#endif /* WIRELESS_EXT */
 #ifdef MICSUPPORT
        /* MIC stuff */
        struct crypto_tfm       *tfm;
@@ -2527,7 +2521,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
        unsigned long mem_start, mem_len, aux_start, aux_len;
        int rc = -1;
        int i;
-       unsigned char *busaddroff,*vpackoff;
+       dma_addr_t busaddroff;
+       unsigned char *vpackoff;
        unsigned char __iomem *pciaddroff;
 
        mem_start = pci_resource_start(pci, 1);
@@ -2570,7 +2565,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
        /*
         * Setup descriptor RX, TX, CONFIG
         */
-       busaddroff = (unsigned char *)ai->shared_dma;
+       busaddroff = ai->shared_dma;
        pciaddroff = ai->pciaux + AUX_OFFSET;
        vpackoff   = ai->shared;
 
@@ -2579,7 +2574,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
                ai->rxfids[i].pending = 0;
                ai->rxfids[i].card_ram_off = pciaddroff;
                ai->rxfids[i].virtual_host_addr = vpackoff;
-               ai->rxfids[i].rx_desc.host_addr = (dma_addr_t) busaddroff;
+               ai->rxfids[i].rx_desc.host_addr = busaddroff;
                ai->rxfids[i].rx_desc.valid = 1;
                ai->rxfids[i].rx_desc.len = PKTSIZE;
                ai->rxfids[i].rx_desc.rdy = 0;
@@ -2594,7 +2589,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
                ai->txfids[i].card_ram_off = pciaddroff;
                ai->txfids[i].virtual_host_addr = vpackoff;
                ai->txfids[i].tx_desc.valid = 1;
-               ai->txfids[i].tx_desc.host_addr = (dma_addr_t) busaddroff;
+               ai->txfids[i].tx_desc.host_addr = busaddroff;
                memcpy(ai->txfids[i].virtual_host_addr,
                        &wifictlhdr8023, sizeof(wifictlhdr8023));
 
@@ -2607,8 +2602,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
        /* Rid descriptor setup */
        ai->config_desc.card_ram_off = pciaddroff;
        ai->config_desc.virtual_host_addr = vpackoff;
-       ai->config_desc.rid_desc.host_addr = (dma_addr_t) busaddroff;
-       ai->ridbus = (dma_addr_t)busaddroff;
+       ai->config_desc.rid_desc.host_addr = busaddroff;
+       ai->ridbus = busaddroff;
        ai->config_desc.rid_desc.rid = 0;
        ai->config_desc.rid_desc.len = RIDSIZE;
        ai->config_desc.rid_desc.valid = 1;
@@ -2647,9 +2642,7 @@ static void wifi_setup(struct net_device *dev)
        dev->get_stats = &airo_get_stats;
        dev->set_mac_address = &airo_set_mac_address;
        dev->do_ioctl = &airo_ioctl;
-#ifdef WIRELESS_EXT
        dev->wireless_handlers = &airo_handler_def;
-#endif /* WIRELESS_EXT */
        dev->change_mtu = &airo_change_mtu;
        dev->open = &airo_open;
        dev->stop = &airo_close;
@@ -2675,9 +2668,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
        dev->priv = ethdev->priv;
        dev->irq = ethdev->irq;
        dev->base_addr = ethdev->base_addr;
-#ifdef WIRELESS_EXT
        dev->wireless_data = ethdev->wireless_data;
-#endif /* WIRELESS_EXT */
        memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
        err = register_netdev(dev);
        if (err<0) {
@@ -2755,11 +2746,9 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
        dev->set_multicast_list = &airo_set_multicast_list;
        dev->set_mac_address = &airo_set_mac_address;
        dev->do_ioctl = &airo_ioctl;
-#ifdef WIRELESS_EXT
        dev->wireless_handlers = &airo_handler_def;
        ai->wireless_data.spy_data = &ai->spy_data;
        dev->wireless_data = &ai->wireless_data;
-#endif /* WIRELESS_EXT */
        dev->change_mtu = &airo_change_mtu;
        dev->open = &airo_open;
        dev->stop = &airo_close;
@@ -5515,12 +5504,13 @@ static int airo_pci_resume(struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct airo_info *ai = dev->priv;
        Resp rsp;
+       pci_power_t prev_state = pdev->current_state;
 
-       pci_set_power_state(pdev, 0);
+       pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
-       pci_enable_wake(pdev, pci_choose_state(pdev, ai->power), 0);
+       pci_enable_wake(pdev, PCI_D0, 0);
 
-       if (ai->power.event > 1) {
+       if (prev_state != PCI_D1) {
                reset_card(dev, 0);
                mpi_init_descriptors(ai);
                setup_card(ai, dev->dev_addr, 0);
@@ -5598,7 +5588,6 @@ static void __exit airo_cleanup_module( void )
        remove_proc_entry("aironet", proc_root_driver);
 }
 
-#ifdef WIRELESS_EXT
 /*
  * Initial Wireless Extension code for Aironet driver by :
  *     Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
@@ -7107,8 +7096,6 @@ static const struct iw_handler_def        airo_handler_def =
        .get_wireless_stats = airo_get_wireless_stats,
 };
 
-#endif /* WIRELESS_EXT */
-
 /*
  * This defines the configuration part of the Wireless Extensions
  * Note : irq and spinlock protection will occur in the subroutines
@@ -7187,7 +7174,6 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return rc;
 }
 
-#ifdef WIRELESS_EXT
 /*
  * Get the Wireless stats out of the driver
  * Note : irq and spinlock protection will occur in the subroutines
@@ -7260,7 +7246,6 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
 
        return &local->wstats;
 }
-#endif /* WIRELESS_EXT */
 
 #ifdef CISCO_EXT
 /*
index 9d496703c4650dec63a23689bfc44e814436ec50..7b321f7cf358d05b3656dfce2e918daa373ce39b 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/current.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
+#include <linux/delay.h>
 #include <asm/pmac_feature.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
 
 #include "orinoco.h"
 
index 587869d86eeea1ea195137619db5ec316bd4f774..d57011028b7279ef576191c006fd392027c16802 100644 (file)
@@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_private *priv);
 static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
 static void atmel_command_irq(struct atmel_private *priv);
 static int atmel_validate_channel(struct atmel_private *priv, int channel);
-static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 
+static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 
                                   u16 frame_len, u8 rssi);
 static void atmel_management_timer(u_long a);
 static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
 static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
-static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
                                            u8 *body, int body_len);
 
 static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -827,7 +827,7 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
 static int start_tx (struct sk_buff *skb, struct net_device *dev)
 {
        struct atmel_private *priv = netdev_priv(dev);
-       struct ieee80211_hdr header;
+       struct ieee80211_hdr_4addr header;
        unsigned long flags;
        u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
        u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
@@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
 }
 
 static void atmel_transmit_management_frame(struct atmel_private *priv, 
-                                           struct ieee80211_hdr *header,
+                                           struct ieee80211_hdr_4addr *header,
                                            u8 *body, int body_len)
 {
        u16 buff;
@@ -917,7 +917,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
        tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
 }
        
-static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, 
+static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 
                         u16 msdu_size, u16 rx_packet_loc, u32 crc)
 {
        /* fast path: unfragmented packet copy directly into skbuf */
@@ -990,7 +990,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
        return (crc ^ 0xffffffff) == netcrc;
 }
 
-static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, 
+static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 
                         u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
 {
        u8 mac4[6]; 
@@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *heade
 static void rx_done_irq(struct atmel_private *priv)
 {
        int i;
-       struct ieee80211_hdr header;
+       struct ieee80211_hdr_4addr header;
        
        for (i = 0; 
             atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -2650,7 +2650,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
  
 static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
 {
-       struct ieee80211_hdr header;
+       struct ieee80211_hdr_4addr header;
        struct auth_body auth;
        
        header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 
@@ -2688,7 +2688,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
 {
        u8 *ssid_el_p;
        int bodysize;
-       struct ieee80211_hdr header;
+       struct ieee80211_hdr_4addr header;
        struct ass_req_format {
                u16 capability;
                u16 listen_interval; 
@@ -2738,7 +2738,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
 }
 
-static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header)
+static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr_4addr *header)
 {
        if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
                return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
@@ -2788,7 +2788,7 @@ static int retrieve_bss(struct atmel_private *priv)
 }
 
 
-static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
                           u16 capability, u16 beacon_period, u8 channel, u8 rssi, 
                           u8 ssid_len, u8 *ssid, int is_beacon)
 {
@@ -3072,7 +3072,7 @@ static void atmel_smooth_qual(struct atmel_private *priv)
 }
 
 /* deals with incoming managment frames. */
-static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 
+static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 
                      u16 frame_len, u8 rssi)
 {
        u16 subtype;
index 21c3d0d227e62e9572cdcf56ae3c6527688974fa..eba0d9d2b7c53015bc96c4f909e6e728faf2e52e 100644 (file)
  */
 
 #include <linux/config.h>
-
 #include <linux/module.h>
-#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/net.h>
-#include <asm/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
 
 #include "hermes.h"
 
index 8c9e874c9118c494a74a42979e45c1c8f68030c0..ad28e32943608af5351fc805fb10e5aa3254ed27 100644 (file)
@@ -30,9 +30,8 @@
  * access to the hermes_t structure, and to the hardware
 */
 
-#include <linux/delay.h>
 #include <linux/if_ether.h>
-#include <asm/byteorder.h>
+#include <asm/io.h>
 
 /*
  * Limits and constants
 #define        HERMES_RXSTAT_WMP               (0x6000)        /* Wavelan-II Management Protocol frame */
 
 struct hermes_tx_descriptor {
-       u16 status;
-       u16 reserved1;
-       u16 reserved2;
-       u32 sw_support;
+       __le16 status;
+       __le16 reserved1;
+       __le16 reserved2;
+       __le32 sw_support;
        u8 retry_count;
        u8 tx_rate;
-       u16 tx_control; 
+       __le16 tx_control;      
 } __attribute__ ((packed));
 
 #define HERMES_TXSTAT_RETRYERR         (0x0001)
@@ -222,60 +221,60 @@ struct hermes_tx_descriptor {
 #define HERMES_INQ_SEC_STAT_AGERE      (0xF202)
 
 struct hermes_tallies_frame {
-       u16 TxUnicastFrames;
-       u16 TxMulticastFrames;
-       u16 TxFragments;
-       u16 TxUnicastOctets;
-       u16 TxMulticastOctets;
-       u16 TxDeferredTransmissions;
-       u16 TxSingleRetryFrames;
-       u16 TxMultipleRetryFrames;
-       u16 TxRetryLimitExceeded;
-       u16 TxDiscards;
-       u16 RxUnicastFrames;
-       u16 RxMulticastFrames;
-       u16 RxFragments;
-       u16 RxUnicastOctets;
-       u16 RxMulticastOctets;
-       u16 RxFCSErrors;
-       u16 RxDiscards_NoBuffer;
-       u16 TxDiscardsWrongSA;
-       u16 RxWEPUndecryptable;
-       u16 RxMsgInMsgFragments;
-       u16 RxMsgInBadMsgFragments;
+       __le16 TxUnicastFrames;
+       __le16 TxMulticastFrames;
+       __le16 TxFragments;
+       __le16 TxUnicastOctets;
+       __le16 TxMulticastOctets;
+       __le16 TxDeferredTransmissions;
+       __le16 TxSingleRetryFrames;
+       __le16 TxMultipleRetryFrames;
+       __le16 TxRetryLimitExceeded;
+       __le16 TxDiscards;
+       __le16 RxUnicastFrames;
+       __le16 RxMulticastFrames;
+       __le16 RxFragments;
+       __le16 RxUnicastOctets;
+       __le16 RxMulticastOctets;
+       __le16 RxFCSErrors;
+       __le16 RxDiscards_NoBuffer;
+       __le16 TxDiscardsWrongSA;
+       __le16 RxWEPUndecryptable;
+       __le16 RxMsgInMsgFragments;
+       __le16 RxMsgInBadMsgFragments;
        /* Those last are probably not available in very old firmwares */
-       u16 RxDiscards_WEPICVError;
-       u16 RxDiscards_WEPExcluded;
+       __le16 RxDiscards_WEPICVError;
+       __le16 RxDiscards_WEPExcluded;
 } __attribute__ ((packed));
 
 /* Grabbed from wlan-ng - Thanks Mark... - Jean II
  * This is the result of a scan inquiry command */
 /* Structure describing info about an Access Point */
 struct prism2_scan_apinfo {
-       u16 channel;            /* Channel where the AP sits */
-       u16 noise;              /* Noise level */
-       u16 level;              /* Signal level */
+       __le16 channel;         /* Channel where the AP sits */
+       __le16 noise;           /* Noise level */
+       __le16 level;           /* Signal level */
        u8 bssid[ETH_ALEN];     /* MAC address of the Access Point */
-       u16 beacon_interv;      /* Beacon interval */
-       u16 capabilities;       /* Capabilities */
-       u16 essid_len;          /* ESSID length */
+       __le16 beacon_interv;   /* Beacon interval */
+       __le16 capabilities;    /* Capabilities */
+       __le16 essid_len;       /* ESSID length */
        u8 essid[32];           /* ESSID of the network */
        u8 rates[10];           /* Bit rate supported */
-       u16 proberesp_rate;     /* Data rate of the response frame */
-       u16 atim;               /* ATIM window time, Kus (hostscan only) */
+       __le16 proberesp_rate;  /* Data rate of the response frame */
+       __le16 atim;            /* ATIM window time, Kus (hostscan only) */
 } __attribute__ ((packed));
 
 /* Same stuff for the Lucent/Agere card.
  * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
 struct agere_scan_apinfo {
-       u16 channel;            /* Channel where the AP sits */
-       u16 noise;              /* Noise level */
-       u16 level;              /* Signal level */
+       __le16 channel;         /* Channel where the AP sits */
+       __le16 noise;           /* Noise level */
+       __le16 level;           /* Signal level */
        u8 bssid[ETH_ALEN];     /* MAC address of the Access Point */
-       u16 beacon_interv;      /* Beacon interval */
-       u16 capabilities;       /* Capabilities */
+       __le16 beacon_interv;   /* Beacon interval */
+       __le16 capabilities;    /* Capabilities */
        /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
-       u16 essid_len;          /* ESSID length */
+       __le16 essid_len;       /* ESSID length */
        u8 essid[32];           /* ESSID of the network */
 } __attribute__ ((packed));
 
@@ -283,16 +282,16 @@ struct agere_scan_apinfo {
 struct symbol_scan_apinfo {
        u8 channel;             /* Channel where the AP sits */
        u8 unknown1;            /* 8 in 2.9x and 3.9x f/w, 0 otherwise */
-       u16 noise;              /* Noise level */
-       u16 level;              /* Signal level */
+       __le16 noise;           /* Noise level */
+       __le16 level;           /* Signal level */
        u8 bssid[ETH_ALEN];     /* MAC address of the Access Point */
-       u16 beacon_interv;      /* Beacon interval */
-       u16 capabilities;       /* Capabilities */
+       __le16 beacon_interv;   /* Beacon interval */
+       __le16 capabilities;    /* Capabilities */
        /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
-       u16 essid_len;          /* ESSID length */
+       __le16 essid_len;       /* ESSID length */
        u8 essid[32];           /* ESSID of the network */
-       u16 rates[5];           /* Bit rate supported */
-       u16 basic_rates;        /* Basic rates bitmask */
+       __le16 rates[5];        /* Bit rate supported */
+       __le16 basic_rates;     /* Basic rates bitmask */
        u8 unknown2[6];         /* Always FF:FF:FF:FF:00:00 */
        u8 unknown3[8];         /* Always 0, appeared in f/w 3.91-68 */
 } __attribute__ ((packed));
@@ -312,7 +311,7 @@ union hermes_scan_info {
 #define HERMES_LINKSTATUS_ASSOC_FAILED    (0x0006)
   
 struct hermes_linkstatus {
-       u16 linkstatus;         /* Link status */
+       __le16 linkstatus;         /* Link status */
 } __attribute__ ((packed));
 
 struct hermes_response {
@@ -321,8 +320,8 @@ struct hermes_response {
 
 /* "ID" structure - used for ESSID and station nickname */
 struct hermes_idstring {
-       u16 len;
-       u16 val[16];
+       __le16 len;
+       __le16 val[16];
 } __attribute__ ((packed));
 
 struct hermes_multicast {
@@ -447,7 +446,7 @@ static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count
 
 static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
 {
-       u16 rec;
+       __le16 rec;
        int err;
 
        err = HERMES_READ_RECORD(hw, bap, rid, &rec);
@@ -457,7 +456,7 @@ static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
 
 static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
 {
-       u16 rec = cpu_to_le16(word);
+       __le16 rec = cpu_to_le16(word);
        return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
 }
 
index e7f5821b49429c3e05470a0c377d0aca788a87de..6a96cd9f2685c7d8d962ba7f9ef5de47c4896d23 100644 (file)
@@ -716,9 +716,6 @@ static int prism2_close(struct net_device *dev)
                hostap_deauth_all_stas(dev, local->ap, 1);
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
-       if (local->func->dev_close && local->func->dev_close(local))
-               return 0;
-
        if (dev == local->dev) {
                local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL);
        }
@@ -766,9 +763,6 @@ static int prism2_open(struct net_device *dev)
            local->hw_downloading)
                return -ENODEV;
 
-       if (local->func->dev_open && local->func->dev_open(local))
-               return 1;
-
        if (!try_module_get(local->hw_module))
                return -ENODEV;
        local->num_dev_open++;
index b0501243b175f99090f2b7321cb15ba77be4b8bd..ffac50899454353b324d52a2593fbb281ab05d23 100644 (file)
@@ -6,10 +6,10 @@
 void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
                          struct hostap_80211_rx_status *rx_stats)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
 
        printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
               "jiffies=%ld\n",
@@ -51,7 +51,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
        int hdrlen, phdrlen, head_need, tail_need;
        u16 fc;
        int prism_header, ret;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
 
        iface = netdev_priv(dev);
        local = iface->local;
@@ -70,7 +70,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
                phdrlen = 0;
        }
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
@@ -215,7 +215,7 @@ prism2_frag_cache_find(local_info_t *local, unsigned int seq,
 
 /* Called only as a tasklet (software IRQ) */
 static struct sk_buff *
-prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
+prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr_4addr *hdr)
 {
        struct sk_buff *skb = NULL;
        u16 sc;
@@ -229,7 +229,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
        if (frag == 0) {
                /* Reserve enough space to fit maximum frame length */
                skb = dev_alloc_skb(local->dev->mtu +
-                                   sizeof(struct ieee80211_hdr) +
+                                   sizeof(struct ieee80211_hdr_4addr) +
                                    8 /* LLC */ +
                                    2 /* alignment */ +
                                    8 /* WEP */ + ETH_ALEN /* WDS */);
@@ -267,7 +267,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
 
 /* Called only as a tasklet (software IRQ) */
 static int prism2_frag_cache_invalidate(local_info_t *local,
-                                       struct ieee80211_hdr *hdr)
+                                       struct ieee80211_hdr_4addr *hdr)
 {
        u16 sc;
        unsigned int seq;
@@ -441,7 +441,7 @@ hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
                     u16 stype)
 {
        if (local->iw_mode == IW_MODE_MASTER) {
-               hostap_update_sta_ps(local, (struct ieee80211_hdr *)
+               hostap_update_sta_ps(local, (struct ieee80211_hdr_4addr *)
                                     skb->data);
        }
 
@@ -520,7 +520,7 @@ static inline struct net_device *prism2_rx_get_wds(local_info_t *local,
 
 
 static inline int
-hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr,
+hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
                    u16 fc, struct net_device **wds)
 {
        /* FIX: is this really supposed to accept WDS frames only in Master
@@ -579,13 +579,13 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
 {
        struct net_device *dev = local->dev;
        u16 fc, ethertype;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u8 *pos;
 
        if (skb->len < 24)
                return 0;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        /* check that the frame is unicast frame to us */
@@ -619,13 +619,13 @@ static inline int
 hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
                        struct ieee80211_crypt_data *crypt)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        int res, hdrlen;
 
        if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
                return 0;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
        if (local->tkip_countermeasures &&
@@ -658,13 +658,13 @@ static inline int
 hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
                             int keyidx, struct ieee80211_crypt_data *crypt)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        int res, hdrlen;
 
        if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
                return 0;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
        atomic_inc(&crypt->refcnt);
@@ -689,7 +689,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
 {
        struct hostap_interface *iface;
        local_info_t *local;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        size_t hdrlen;
        u16 fc, type, stype, sc;
        struct net_device *wds = NULL;
@@ -716,7 +716,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
        dev = local->ddev;
        iface = netdev_priv(dev);
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        stats = hostap_get_stats(dev);
 
        if (skb->len < 10)
@@ -737,7 +737,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
                struct iw_quality wstats;
                wstats.level = rx_stats->signal;
                wstats.noise = rx_stats->noise;
-               wstats.updated = 6;     /* No qual value */
+               wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED
+                       | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM;
                /* Update spy records */
                wireless_spy_update(dev, hdr->addr2, &wstats);
        }
@@ -889,7 +890,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
        if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
            (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
                goto rx_dropped;
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
 
        /* skb: hdr + (possibly fragmented) plaintext payload */
 
@@ -941,7 +942,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
                /* this was the last fragment and the frame will be
                 * delivered, so remove skb from fragment cache */
                skb = frag_skb;
-               hdr = (struct ieee80211_hdr *) skb->data;
+               hdr = (struct ieee80211_hdr_4addr *) skb->data;
                prism2_frag_cache_invalidate(local, hdr);
        }
 
@@ -952,7 +953,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
            hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
                goto rx_dropped;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) {
                if (local->ieee_802_1x &&
                    hostap_is_eapol_frame(local, skb)) {
index 6358015f65260d8fc9921ab185d42e9e5aa82f2b..9d24f8a38ac525843772b41143db514a9f686ecc 100644 (file)
@@ -1,9 +1,9 @@
 void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
 
        printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
               name, skb->len, jiffies);
@@ -41,7 +41,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct hostap_interface *iface;
        local_info_t *local;
        int need_headroom, need_tailroom = 0;
-       struct ieee80211_hdr hdr;
+       struct ieee80211_hdr_4addr hdr;
        u16 fc, ethertype = 0;
        enum {
                WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
@@ -244,7 +244,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct hostap_interface *iface;
        local_info_t *local;
        struct hostap_skb_tx_data *meta;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc;
 
        iface = netdev_priv(dev);
@@ -266,7 +266,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        meta->iface = iface;
 
        if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
-               hdr = (struct ieee80211_hdr *) skb->data;
+               hdr = (struct ieee80211_hdr_4addr *) skb->data;
                fc = le16_to_cpu(hdr->frame_ctl);
                if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
                    WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) {
@@ -289,7 +289,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
 {
        struct hostap_interface *iface;
        local_info_t *local;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc;
        int hdr_len, res;
 
@@ -303,7 +303,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
 
        if (local->tkip_countermeasures &&
            crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
-               hdr = (struct ieee80211_hdr *) skb->data;
+               hdr = (struct ieee80211_hdr_4addr *) skb->data;
                if (net_ratelimit()) {
                        printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
                               "TX packet to " MACSTR "\n",
@@ -317,15 +317,15 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
        if (skb == NULL)
                return NULL;
 
-       if ((skb_headroom(skb) < crypt->ops->extra_prefix_len ||
-            skb_tailroom(skb) < crypt->ops->extra_postfix_len) &&
-           pskb_expand_head(skb, crypt->ops->extra_prefix_len,
-                            crypt->ops->extra_postfix_len, GFP_ATOMIC)) {
+       if ((skb_headroom(skb) < crypt->ops->extra_mpdu_prefix_len ||
+            skb_tailroom(skb) < crypt->ops->extra_mpdu_postfix_len) &&
+           pskb_expand_head(skb, crypt->ops->extra_mpdu_prefix_len,
+                            crypt->ops->extra_mpdu_postfix_len, GFP_ATOMIC)) {
                kfree_skb(skb);
                return NULL;
        }
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
        hdr_len = hostap_80211_get_hdrlen(fc);
 
@@ -360,7 +360,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
        ap_tx_ret tx_ret;
        struct hostap_skb_tx_data *meta;
        int no_encrypt = 0;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
 
        iface = netdev_priv(dev);
        local = iface->local;
@@ -403,7 +403,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_ret = hostap_handle_sta_tx(local, &tx);
        skb = tx.skb;
        meta = (struct hostap_skb_tx_data *) skb->cb;
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
        switch (tx_ret) {
        case AP_TX_CONTINUE:
index 930cef8367f2a2ffa983bf9c10fa28a56ec6377d..9da94ab7f05f87e24eec2816ca5094e894906d01 100644 (file)
@@ -591,14 +591,14 @@ static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
 {
        struct ap_data *ap = data;
        u16 fc;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
 
        if (!ap->local->hostapd || !ap->local->apdev) {
                dev_kfree_skb(skb);
                return;
        }
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        /* Pass the TX callback frame to the hostapd; use 802.11 header version
@@ -623,7 +623,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
 {
        struct ap_data *ap = data;
        struct net_device *dev = ap->local->dev;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc, *pos, auth_alg, auth_transaction, status;
        struct sta_info *sta = NULL;
        char *txt = NULL;
@@ -633,7 +633,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
                return;
        }
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
        if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
            WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH ||
@@ -692,7 +692,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
 {
        struct ap_data *ap = data;
        struct net_device *dev = ap->local->dev;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc, *pos, status;
        struct sta_info *sta = NULL;
        char *txt = NULL;
@@ -702,7 +702,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
                return;
        }
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
        if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
            (WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP &&
@@ -757,12 +757,12 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
 static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
 {
        struct ap_data *ap = data;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        struct sta_info *sta;
 
        if (skb->len < 24)
                goto fail;
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        if (ok) {
                spin_lock(&ap->sta_table_lock);
                sta = ap_get_sta(ap, hdr->addr1);
@@ -918,7 +918,7 @@ static void prism2_send_mgmt(struct net_device *dev,
 {
        struct hostap_interface *iface;
        local_info_t *local;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u16 fc;
        struct sk_buff *skb;
        struct hostap_skb_tx_data *meta;
@@ -944,7 +944,7 @@ static void prism2_send_mgmt(struct net_device *dev,
 
        fc = type_subtype;
        hdrlen = hostap_80211_get_hdrlen(fc);
-       hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen);
+       hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, hdrlen);
        if (body)
                memcpy(skb_put(skb, body_len), body, body_len);
 
@@ -1256,14 +1256,14 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
        }
 
        skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
-                           ap->crypt->extra_prefix_len +
-                           ap->crypt->extra_postfix_len);
+                           ap->crypt->extra_mpdu_prefix_len +
+                           ap->crypt->extra_mpdu_postfix_len);
        if (skb == NULL) {
                kfree(tmpbuf);
                return NULL;
        }
 
-       skb_reserve(skb, ap->crypt->extra_prefix_len);
+       skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
        memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
               WLAN_AUTH_CHALLENGE_LEN);
        if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
@@ -1272,7 +1272,7 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
                return NULL;
        }
 
-       memcpy(tmpbuf, skb->data + ap->crypt->extra_prefix_len,
+       memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len,
               WLAN_AUTH_CHALLENGE_LEN);
        dev_kfree_skb(skb);
 
@@ -1285,7 +1285,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
                          struct hostap_80211_rx_status *rx_stats)
 {
        struct net_device *dev = local->dev;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
        size_t hdrlen;
        struct ap_data *ap = local->ap;
        char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
@@ -1498,7 +1498,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
                         struct hostap_80211_rx_status *rx_stats, int reassoc)
 {
        struct net_device *dev = local->dev;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
        char body[12], *p, *lpos;
        int len, left;
        u16 *pos;
@@ -1705,7 +1705,7 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb,
                          struct hostap_80211_rx_status *rx_stats)
 {
        struct net_device *dev = local->dev;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
        char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
        int len;
        u16 reason_code, *pos;
@@ -1746,7 +1746,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
                            struct hostap_80211_rx_status *rx_stats)
 {
        struct net_device *dev = local->dev;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
        char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
        int len;
        u16 reason_code, *pos;
@@ -1784,7 +1784,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
 
 /* Called only as a scheduled task for pending AP frames. */
 static void ap_handle_data_nullfunc(local_info_t *local,
-                                   struct ieee80211_hdr *hdr)
+                                   struct ieee80211_hdr_4addr *hdr)
 {
        struct net_device *dev = local->dev;
 
@@ -1801,7 +1801,7 @@ static void ap_handle_data_nullfunc(local_info_t *local,
 
 /* Called only as a scheduled task for pending AP frames. */
 static void ap_handle_dropped_data(local_info_t *local,
-                                  struct ieee80211_hdr *hdr)
+                                  struct ieee80211_hdr_4addr *hdr)
 {
        struct net_device *dev = local->dev;
        struct sta_info *sta;
@@ -1860,7 +1860,7 @@ static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
 
 /* Called only as a scheduled task for pending AP frames. */
 static void handle_pspoll(local_info_t *local,
-                         struct ieee80211_hdr *hdr,
+                         struct ieee80211_hdr_4addr *hdr,
                          struct hostap_80211_rx_status *rx_stats)
 {
        struct net_device *dev = local->dev;
@@ -1979,7 +1979,7 @@ static void handle_wds_oper_queue(void *data)
 static void handle_beacon(local_info_t *local, struct sk_buff *skb,
                          struct hostap_80211_rx_status *rx_stats)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
        char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
        int len, left;
        u16 *pos, beacon_int, capability;
@@ -2137,11 +2137,11 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
        struct net_device *dev = local->dev;
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
        u16 fc, type, stype;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
 
        /* FIX: should give skb->len to handler functions and check that the
         * buffer is long enough */
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
        type = WLAN_FC_GET_TYPE(fc);
        stype = WLAN_FC_GET_STYPE(fc);
@@ -2258,7 +2258,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
        struct hostap_interface *iface;
        local_info_t *local;
        u16 fc;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
 
        iface = netdev_priv(dev);
        local = iface->local;
@@ -2268,7 +2268,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
 
        local->stats.rx_packets++;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
@@ -2289,7 +2289,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
 static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
 {
        struct sk_buff *skb;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        struct hostap_80211_rx_status rx_stats;
 
        if (skb_queue_empty(&sta->tx_buf))
@@ -2302,7 +2302,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
                return;
        }
 
-       hdr = (struct ieee80211_hdr *) skb_put(skb, 16);
+       hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, 16);
 
        /* Generate a fake pspoll frame to start packet delivery */
        hdr->frame_ctl = __constant_cpu_to_le16(
@@ -2349,7 +2349,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
                qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
                qual[count].updated = sta->last_rx_updated;
 
-               sta->last_rx_updated = 0;
+               sta->last_rx_updated = IW_QUAL_DBM;
 
                count++;
                if (count >= buf_size)
@@ -2467,7 +2467,7 @@ static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
                }
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
-               sta->last_rx_updated = 0;
+               sta->last_rx_updated = IW_QUAL_DBM;
 
                /* To be continued, we should make good use of IWEVCUSTOM */
        }
@@ -2685,7 +2685,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
        struct sta_info *sta = NULL;
        struct sk_buff *skb = tx->skb;
        int set_tim, ret;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        struct hostap_skb_tx_data *meta;
 
        meta = (struct hostap_skb_tx_data *) skb->cb;
@@ -2694,7 +2694,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
            meta->iface->type == HOSTAP_INTERFACE_STA)
                goto out;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
 
        if (hdr->addr1[0] & 0x01) {
                /* broadcast/multicast frame - no AP related processing */
@@ -2821,10 +2821,10 @@ void hostap_handle_sta_release(void *ptr)
 void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
 {
        struct sta_info *sta;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        struct hostap_skb_tx_data *meta;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
        meta = (struct hostap_skb_tx_data *) skb->cb;
 
        spin_lock(&local->ap->sta_table_lock);
@@ -2892,7 +2892,7 @@ static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
 
 /* Called only as a tasklet (software IRQ). Called for each RX frame to update
  * STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */
-int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr)
+int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr)
 {
        struct sta_info *sta;
        u16 fc;
@@ -2925,12 +2925,12 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
        int ret;
        struct sta_info *sta;
        u16 fc, type, stype;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
 
        if (local->ap == NULL)
                return AP_RX_CONTINUE;
 
-       hdr = (struct ieee80211_hdr *) skb->data;
+       hdr = (struct ieee80211_hdr_4addr *) skb->data;
 
        fc = le16_to_cpu(hdr->frame_ctl);
        type = WLAN_FC_GET_TYPE(fc);
@@ -3058,7 +3058,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
 
 /* Called only as a tasklet (software IRQ) */
 int hostap_handle_sta_crypto(local_info_t *local,
-                            struct ieee80211_hdr *hdr,
+                            struct ieee80211_hdr_4addr *hdr,
                             struct ieee80211_crypt_data **crypt,
                             void **sta_ptr)
 {
@@ -3160,7 +3160,7 @@ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
 
 /* Called only as a tasklet (software IRQ) */
 int hostap_update_rx_stats(struct ap_data *ap,
-                          struct ieee80211_hdr *hdr,
+                          struct ieee80211_hdr_4addr *hdr,
                           struct hostap_80211_rx_status *rx_stats)
 {
        struct sta_info *sta;
@@ -3174,7 +3174,7 @@ int hostap_update_rx_stats(struct ap_data *ap,
                sta->last_rx_silence = rx_stats->noise;
                sta->last_rx_signal = rx_stats->signal;
                sta->last_rx_rate = rx_stats->rate;
-               sta->last_rx_updated = 7;
+               sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
                if (rx_stats->rate == 10)
                        sta->rx_count[0]++;
                else if (rx_stats->rate == 20)
index 816a52bcea8f7adc71078a1a0bca50a18b40f29d..6d00df69c2e3e9f57f28af33a8af725aebb8bae9 100644 (file)
@@ -233,7 +233,7 @@ struct hostap_tx_data {
 ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
 void hostap_handle_sta_release(void *ptr);
 void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
-int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr);
+int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr);
 typedef enum {
        AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED
 } ap_rx_ret;
@@ -241,13 +241,13 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
                               struct sk_buff *skb,
                               struct hostap_80211_rx_status *rx_stats,
                               int wds);
-int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr,
+int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
                             struct ieee80211_crypt_data **crypt,
                             void **sta_ptr);
 int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
 int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
 int hostap_add_sta(struct ap_data *ap, u8 *sta_addr);
-int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr,
+int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr_4addr *hdr,
                           struct hostap_80211_rx_status *rx_stats);
 void hostap_update_rates(local_info_t *local);
 void hostap_add_wds_links(local_info_t *local);
index faa83badf0a1b5e5897566b2be057583ca4d26a9..2643976a66775f559936b4fd0ce84672d96342d2 100644 (file)
@@ -492,42 +492,10 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
 }
 
 
-static int prism2_pccard_dev_open(local_info_t *local)
-{
-       struct hostap_cs_priv *hw_priv = local->hw_priv;
-       hw_priv->link->open++;
-       return 0;
-}
-
-
-static int prism2_pccard_dev_close(local_info_t *local)
-{
-       struct hostap_cs_priv *hw_priv;
-
-       if (local == NULL || local->hw_priv == NULL)
-               return 1;
-       hw_priv = local->hw_priv;
-       if (hw_priv->link == NULL)
-               return 1;
-
-       if (!hw_priv->link->open) {
-               printk(KERN_WARNING "%s: prism2_pccard_dev_close(): "
-                      "link not open?!\n", local->dev->name);
-               return 1;
-       }
-
-       hw_priv->link->open--;
-
-       return 0;
-}
-
-
 static struct prism2_helper_functions prism2_pccard_funcs =
 {
        .card_present   = prism2_pccard_card_present,
        .cor_sreset     = prism2_pccard_cor_sreset,
-       .dev_open       = prism2_pccard_dev_open,
-       .dev_close      = prism2_pccard_dev_close,
        .genesis_reset  = prism2_pccard_genesis_reset,
        .hw_type        = HOSTAP_HW_PCCARD,
 };
@@ -597,13 +565,14 @@ static void prism2_detach(dev_link_t *link)
        *linkp = link->next;
        /* release net devices */
        if (link->priv) {
+               struct hostap_cs_priv *hw_priv;
                struct net_device *dev;
                struct hostap_interface *iface;
                dev = link->priv;
                iface = netdev_priv(dev);
-               kfree(iface->local->hw_priv);
-               iface->local->hw_priv = NULL;
+               hw_priv = iface->local->hw_priv;
                prism2_free_local_data(dev);
+               kfree(hw_priv);
        }
        kfree(link);
 }
@@ -883,6 +852,13 @@ static int prism2_event(event_t event, int priority,
 {
        dev_link_t *link = args->client_data;
        struct net_device *dev = (struct net_device *) link->priv;
+       int dev_open = 0;
+
+       if (link->state & DEV_CONFIG) {
+               struct hostap_interface *iface = netdev_priv(dev);
+               if (iface && iface->local)
+                       dev_open = iface->local->num_dev_open > 0;
+       }
 
        switch (event) {
        case CS_EVENT_CARD_INSERTION:
@@ -911,7 +887,7 @@ static int prism2_event(event_t event, int priority,
        case CS_EVENT_RESET_PHYSICAL:
                PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info);
                if (link->state & DEV_CONFIG) {
-                       if (link->open) {
+                       if (dev_open) {
                                netif_stop_queue(dev);
                                netif_device_detach(dev);
                        }
@@ -931,8 +907,8 @@ static int prism2_event(event_t event, int priority,
                        pcmcia_request_configuration(link->handle,
                                                     &link->conf);
                        prism2_hw_shutdown(dev, 1);
-                       prism2_hw_config(dev, link->open ? 0 : 1);
-                       if (link->open) {
+                       prism2_hw_config(dev, dev_open ? 0 : 1);
+                       if (dev_open) {
                                netif_device_attach(dev);
                                netif_start_queue(dev);
                        }
index e533a663deda5ebc3fd00df476237d9dda5311ce..59fc15572395cc50a4e8f56b2643afd6800f43d1 100644 (file)
@@ -3322,6 +3322,18 @@ static void prism2_free_local_data(struct net_device *dev)
        iface = netdev_priv(dev);
        local = iface->local;
 
+       /* Unregister all netdevs before freeing local data. */
+       list_for_each_safe(ptr, n, &local->hostap_interfaces) {
+               iface = list_entry(ptr, struct hostap_interface, list);
+               if (iface->type == HOSTAP_INTERFACE_MASTER) {
+                       /* special handling for this interface below */
+                       continue;
+               }
+               hostap_remove_interface(iface->dev, 0, 1);
+       }
+
+       unregister_netdev(local->dev);
+
        flush_scheduled_work();
 
        if (timer_pending(&local->crypt_deinit_timer))
@@ -3382,15 +3394,6 @@ static void prism2_free_local_data(struct net_device *dev)
        prism2_download_free_data(local->dl_sec);
 #endif /* PRISM2_DOWNLOAD_SUPPORT */
 
-       list_for_each_safe(ptr, n, &local->hostap_interfaces) {
-               iface = list_entry(ptr, struct hostap_interface, list);
-               if (iface->type == HOSTAP_INTERFACE_MASTER) {
-                       /* special handling for this interface below */
-                       continue;
-               }
-               hostap_remove_interface(iface->dev, 0, 1);
-       }
-
        prism2_clear_set_tim_queue(local);
 
        list_for_each_safe(ptr, n, &local->bss_list) {
@@ -3403,7 +3406,6 @@ static void prism2_free_local_data(struct net_device *dev)
        kfree(local->last_scan_results);
        kfree(local->generic_elem);
 
-       unregister_netdev(local->dev);
        free_netdev(local->dev);
 }
 
index e720369a3515595b1a35129b92be19f0a578325c..53f5246c40aa3bb7bc00eaf80e015a54a7718ec4 100644 (file)
@@ -50,7 +50,8 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
 #endif /* in_atomic */
 
                if (update && prism2_update_comms_qual(dev) == 0)
-                       wstats->qual.updated = 7;
+                       wstats->qual.updated = IW_QUAL_ALL_UPDATED |
+                               IW_QUAL_DBM;
 
                wstats->qual.qual = local->comms_qual;
                wstats->qual.level = local->avg_signal;
@@ -59,7 +60,7 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
                wstats->qual.qual = 0;
                wstats->qual.level = 0;
                wstats->qual.noise = 0;
-               wstats->qual.updated = 0;
+               wstats->qual.updated = IW_QUAL_ALL_INVALID;
        }
 
        return wstats;
@@ -1827,13 +1828,6 @@ static char * __prism2_translate_scan(local_info_t *local,
        iwe.cmd = SIOCGIWAP;
        iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
        memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
-       /* FIX:
-        * I do not know how this is possible, but iwe_stream_add_event
-        * seems to re-order memcpy execution so that len is set only
-        * after copying.. Pre-setting len here "fixes" this, but real
-        * problems should be solved (after which these iwe.len
-        * settings could be removed from this function). */
-       iwe.len = IW_EV_ADDR_LEN;
        current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
                                          IW_EV_ADDR_LEN);
 
@@ -1843,7 +1837,6 @@ static char * __prism2_translate_scan(local_info_t *local,
        iwe.cmd = SIOCGIWESSID;
        iwe.u.data.length = ssid_len;
        iwe.u.data.flags = 1;
-       iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
        current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid);
 
        memset(&iwe, 0, sizeof(iwe));
@@ -1859,7 +1852,6 @@ static char * __prism2_translate_scan(local_info_t *local,
                        iwe.u.mode = IW_MODE_MASTER;
                else
                        iwe.u.mode = IW_MODE_ADHOC;
-               iwe.len = IW_EV_UINT_LEN;
                current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
                                                  IW_EV_UINT_LEN);
        }
@@ -1877,7 +1869,6 @@ static char * __prism2_translate_scan(local_info_t *local,
        if (chan > 0) {
                iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000;
                iwe.u.freq.e = 1;
-               iwe.len = IW_EV_FREQ_LEN;
                current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
                                                  IW_EV_FREQ_LEN);
        }
@@ -1894,7 +1885,10 @@ static char * __prism2_translate_scan(local_info_t *local,
                        iwe.u.qual.noise =
                                HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
                }
-               iwe.len = IW_EV_QUAL_LEN;
+               iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
+                       | IW_QUAL_NOISE_UPDATED
+                       | IW_QUAL_QUAL_INVALID
+                       | IW_QUAL_DBM;
                current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
                                                  IW_EV_QUAL_LEN);
        }
@@ -1906,7 +1900,6 @@ static char * __prism2_translate_scan(local_info_t *local,
        else
                iwe.u.data.flags = IW_ENCODE_DISABLED;
        iwe.u.data.length = 0;
-       iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
        current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, "");
 
        /* TODO: add SuppRates into BSS table */
@@ -1930,7 +1923,7 @@ static char * __prism2_translate_scan(local_info_t *local,
        }
 
        /* TODO: add BeaconInt,resp_rate,atim into BSS table */
-       buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_KERNEL);
+       buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC);
        if (buf && scan) {
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVCUSTOM;
index 025f8cdb55663758d5329163084965c0403ed0db..da0c80fb941cab99dac9d2c93cf0105010f90abd 100644 (file)
@@ -59,11 +59,13 @@ static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
 static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
 {
        struct hostap_interface *iface;
+       struct hostap_pci_priv *hw_priv;
        local_info_t *local;
        unsigned long flags;
 
        iface = netdev_priv(dev);
        local = iface->local;
+       hw_priv = local->hw_priv;
 
        spin_lock_irqsave(&local->lock, flags);
        prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
@@ -74,12 +76,14 @@ static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
 static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
 {
        struct hostap_interface *iface;
+       struct hostap_pci_priv *hw_priv;
        local_info_t *local;
        unsigned long flags;
        u8 v;
 
        iface = netdev_priv(dev);
        local = iface->local;
+       hw_priv = local->hw_priv;
 
        spin_lock_irqsave(&local->lock, flags);
        v = readb(hw_priv->mem_start + a);
@@ -91,11 +95,13 @@ static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
 static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
 {
        struct hostap_interface *iface;
+       struct hostap_pci_priv *hw_priv;
        local_info_t *local;
        unsigned long flags;
 
        iface = netdev_priv(dev);
        local = iface->local;
+       hw_priv = local->hw_priv;
 
        spin_lock_irqsave(&local->lock, flags);
        prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
@@ -106,12 +112,14 @@ static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
 static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
 {
        struct hostap_interface *iface;
+       struct hostap_pci_priv *hw_priv;
        local_info_t *local;
        unsigned long flags;
        u16 v;
 
        iface = netdev_priv(dev);
        local = iface->local;
+       hw_priv = local->hw_priv;
 
        spin_lock_irqsave(&local->lock, flags);
        v = readw(hw_priv->mem_start + a);
@@ -277,8 +285,6 @@ static struct prism2_helper_functions prism2_pci_funcs =
 {
        .card_present   = NULL,
        .cor_sreset     = prism2_pci_cor_sreset,
-       .dev_open       = NULL,
-       .dev_close      = NULL,
        .genesis_reset  = prism2_pci_genesis_reset,
        .hw_type        = HOSTAP_HW_PCI,
 };
@@ -352,8 +358,6 @@ static int prism2_pci_probe(struct pci_dev *pdev,
        return hostap_hw_ready(dev);
 
  fail:
-       kfree(hw_priv);
-
        if (irq_registered && dev)
                free_irq(dev->irq, dev);
 
@@ -364,10 +368,8 @@ static int prism2_pci_probe(struct pci_dev *pdev,
 
  err_out_disable:
        pci_disable_device(pdev);
-       kfree(hw_priv);
-       if (local)
-               local->hw_priv = NULL;
        prism2_free_local_data(dev);
+       kfree(hw_priv);
 
        return -ENODEV;
 }
@@ -392,9 +394,8 @@ static void prism2_pci_remove(struct pci_dev *pdev)
                free_irq(dev->irq, dev);
 
        mem_start = hw_priv->mem_start;
-       kfree(hw_priv);
-       iface->local->hw_priv = NULL;
        prism2_free_local_data(dev);
+       kfree(hw_priv);
 
        iounmap(mem_start);
 
@@ -441,7 +442,7 @@ static int prism2_pci_resume(struct pci_dev *pdev)
 MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
 
 static struct pci_driver prism2_pci_drv_id = {
-       .name           = "prism2_pci",
+       .name           = "hostap_pci",
        .id_table       = prism2_pci_id_table,
        .probe          = prism2_pci_probe,
        .remove         = prism2_pci_remove,
index 474ef83d813e876cbe1fa026ac9d851f828475e7..78d67b408b2f9423080bd276ba4bfc78644164a7 100644 (file)
@@ -328,8 +328,6 @@ static struct prism2_helper_functions prism2_plx_funcs =
 {
        .card_present   = NULL,
        .cor_sreset     = prism2_plx_cor_sreset,
-       .dev_open       = NULL,
-       .dev_close      = NULL,
        .genesis_reset  = prism2_plx_genesis_reset,
        .hw_type        = HOSTAP_HW_PLX,
 };
@@ -570,10 +568,8 @@ static int prism2_plx_probe(struct pci_dev *pdev,
        return hostap_hw_ready(dev);
 
  fail:
-       kfree(hw_priv);
-       if (local)
-               local->hw_priv = NULL;
        prism2_free_local_data(dev);
+       kfree(hw_priv);
 
        if (irq_registered && dev)
                free_irq(dev->irq, dev);
@@ -606,9 +602,8 @@ static void prism2_plx_remove(struct pci_dev *pdev)
        if (dev->irq)
                free_irq(dev->irq, dev);
 
-       kfree(iface->local->hw_priv);
-       iface->local->hw_priv = NULL;
        prism2_free_local_data(dev);
+       kfree(hw_priv);
        pci_disable_device(pdev);
 }
 
@@ -616,7 +611,7 @@ static void prism2_plx_remove(struct pci_dev *pdev)
 MODULE_DEVICE_TABLE(pci, prism2_plx_id_table);
 
 static struct pci_driver prism2_plx_drv_id = {
-       .name           = "prism2_plx",
+       .name           = "hostap_plx",
        .id_table       = prism2_plx_id_table,
        .probe          = prism2_plx_probe,
        .remove         = prism2_plx_remove,
index cc061e1560d39c7150f4770f063f89207f752f14..cfd8015594921a849dd00215d4190ca381b8cd91 100644 (file)
@@ -552,8 +552,6 @@ struct prism2_helper_functions {
         * (hostap_{cs,plx,pci}.c */
        int (*card_present)(local_info_t *local);
        void (*cor_sreset)(local_info_t *local);
-       int (*dev_open)(local_info_t *local);
-       int (*dev_close)(local_info_t *local);
        void (*genesis_reset)(local_info_t *local, int hcr);
 
        /* the following functions are from hostap_hw.c, but they may have some
index 2414e6493aa5639ad88aaa829395976c404fa972..ad7f8cd76db9041376b19a024a74dc38ac5854b4 100644 (file)
@@ -800,8 +800,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
         * doesn't seem to have as many firmware restart cycles...
         *
         * As a test, we're sticking in a 1/100s delay here */
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(HZ / 100);
+       schedule_timeout_uninterruptible(msecs_to_jiffies(10));
 
        return 0;
 
@@ -1256,8 +1255,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
        IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
        i = 5000;
        do {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(40 * HZ / 1000);
+               schedule_timeout_uninterruptible(msecs_to_jiffies(40));
                /* Todo... wait for sync command ... */
 
                read_register(priv->net_dev, IPW_REG_INTA, &inta);
@@ -1411,8 +1409,7 @@ static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
                    (val2 & IPW2100_COMMAND_PHY_OFF))
                        return 0;
 
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(HW_PHY_OFF_LOOP_DELAY);
+               schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY);
        }
 
        return -EIO;
@@ -1466,7 +1463,7 @@ fail_up:
 
 static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
 {
-#define HW_POWER_DOWN_DELAY (HZ / 10)
+#define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100))
 
        struct host_command cmd = {
                .host_command = HOST_PRE_POWER_DOWN,
@@ -1520,10 +1517,8 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
                        printk(KERN_WARNING DRV_NAME ": "
                               "%s: Power down command failed: Error %d\n",
                               priv->net_dev->name, err);
-               else {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(HW_POWER_DOWN_DELAY);
-               }
+               else
+                       schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY);
        }
 
        priv->status &= ~STATUS_ENABLED;
@@ -2953,7 +2948,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
        int next = txq->next;
         int i = 0;
        struct ipw2100_data_header *ipw_hdr;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_3addr *hdr;
 
        while (!list_empty(&priv->tx_pend_list)) {
                /* if there isn't enough space in TBD queue, then
@@ -2989,7 +2984,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
                packet->index = txq->next;
 
                ipw_hdr = packet->info.d_struct.data;
-               hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb->
+               hdr = (struct ieee80211_hdr_3addr *)packet->info.d_struct.txb->
                        fragments[0]->data;
 
                if (priv->ieee->iw_mode == IW_MODE_INFRA) {
@@ -3274,7 +3269,8 @@ static irqreturn_t ipw2100_interrupt(int irq, void *data,
        return IRQ_NONE;
 }
 
-static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev)
+static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev,
+                     int pri)
 {
        struct ipw2100_priv *priv = ieee80211_priv(dev);
        struct list_head *element;
index 2a3cdbd50168aada6c620aac39eb6eb65b4992f0..c9e99ce15d66f45b107845baae368292f3b4f86d 100644 (file)
@@ -808,7 +808,7 @@ struct ipw2100_priv {
 struct ipw2100_rx {
        union {
                unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
-               struct ieee80211_hdr header;
+               struct ieee80211_hdr_4addr header;
                u32 status;
                struct ipw2100_notification notification;
                struct ipw2100_cmd_header command;
index b7f275c00de3cecc592368e90183bb96a3067563..de4e6c23e4b8693a23da434d66d6269b49519974 100644 (file)
@@ -4904,7 +4904,7 @@ static void ipw_rx(struct ipw_priv *priv)
 {
        struct ipw_rx_mem_buffer *rxb;
        struct ipw_rx_packet *pkt;
-       struct ieee80211_hdr *header;
+       struct ieee80211_hdr_4addr *header;
        u32 r, w, i;
        u8 network_packet;
 
@@ -4967,8 +4967,9 @@ static void ipw_rx(struct ipw_priv *priv)
 #endif
 
                                header =
-                                   (struct ieee80211_hdr *)(rxb->skb->data +
-                                                            IPW_RX_FRAME_SIZE);
+                                   (struct ieee80211_hdr_4addr *)(rxb->skb->
+                                                                  data +
+                                                                  IPW_RX_FRAME_SIZE);
                                /* TODO: Check Ad-Hoc dest/source and make sure
                                 * that we are actually parsing these packets
                                 * correctly -- we should probably use the
@@ -5317,8 +5318,6 @@ static int ipw_wx_set_freq(struct net_device *dev,
 
        IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
        return ipw_set_channel(priv, (u8) fwrq->m);
-
-       return 0;
 }
 
 static int ipw_wx_get_freq(struct net_device *dev,
@@ -6010,12 +6009,12 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
        }
 
        if (priv->adapter == IPW_2915ABG) {
-               priv->ieee->abg_ture = 1;
+               priv->ieee->abg_true = 1;
                if (mode & IEEE_A) {
                        band |= IEEE80211_52GHZ_BAND;
                        modulation |= IEEE80211_OFDM_MODULATION;
                } else
-                       priv->ieee->abg_ture = 0;
+                       priv->ieee->abg_true = 0;
        } else {
                if (mode & IEEE_A) {
                        IPW_WARNING("Attempt to set 2200BG into "
@@ -6023,20 +6022,20 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
                        return -EINVAL;
                }
 
-               priv->ieee->abg_ture = 0;
+               priv->ieee->abg_true = 0;
        }
 
        if (mode & IEEE_B) {
                band |= IEEE80211_24GHZ_BAND;
                modulation |= IEEE80211_CCK_MODULATION;
        } else
-               priv->ieee->abg_ture = 0;
+               priv->ieee->abg_true = 0;
 
        if (mode & IEEE_G) {
                band |= IEEE80211_24GHZ_BAND;
                modulation |= IEEE80211_OFDM_MODULATION;
        } else
-               priv->ieee->abg_ture = 0;
+               priv->ieee->abg_true = 0;
 
        priv->ieee->mode = mode;
        priv->ieee->freq_band = band;
@@ -6325,7 +6324,7 @@ we need to heavily modify the ieee80211_skb_to_txb.
 
 static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
+       struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
            txb->fragments[0]->data;
        int i = 0;
        struct tfd_frame *tfd;
@@ -6448,7 +6447,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
 }
 
 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
-                                  struct net_device *dev)
+                                  struct net_device *dev, int pri)
 {
        struct ipw_priv *priv = ieee80211_priv(dev);
        unsigned long flags;
@@ -7108,7 +7107,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                printk(KERN_INFO DRV_NAME
                       ": Detected Intel PRO/Wireless 2915ABG Network "
                       "Connection\n");
-               priv->ieee->abg_ture = 1;
+               priv->ieee->abg_true = 1;
                band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
                modulation = IEEE80211_OFDM_MODULATION |
                    IEEE80211_CCK_MODULATION;
@@ -7124,7 +7123,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                               ": Detected Intel PRO/Wireless 2200BG Network "
                               "Connection\n");
 
-               priv->ieee->abg_ture = 0;
+               priv->ieee->abg_true = 0;
                band = IEEE80211_24GHZ_BAND;
                modulation = IEEE80211_OFDM_MODULATION |
                    IEEE80211_CCK_MODULATION;
index 5b00882133f919740cbe7e098a500e270ff8a5ff..e9cf32bf3e31741936882932b24ce289bf28f982 100644 (file)
@@ -1654,12 +1654,12 @@ static const long ipw_frequencies[] = {
 
 #define IPW_MAX_CONFIG_RETRIES 10
 
-static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
+static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr)
 {
        u32 retval;
        u16 fc;
 
-       retval = sizeof(struct ieee80211_hdr);
+       retval = sizeof(struct ieee80211_hdr_3addr);
        fc = le16_to_cpu(hdr->frame_ctl);
 
        /*
index ca6c03c89926ca89605c5b881a2ccabc0cb81411..92793b958e327d0777ea76291e614460bbe5bb40 100644 (file)
@@ -57,9 +57,7 @@
 #include <linux/bitops.h>
 #ifdef CONFIG_NET_RADIO
 #include <linux/wireless.h>
-#if WIRELESS_EXT > 12
 #include <net/iw_handler.h>
-#endif /* WIRELESS_EXT > 12 */
 #endif
 
 #include <pcmcia/cs_types.h>
@@ -225,10 +223,7 @@ static void update_stats(struct net_device *dev);
 static struct net_device_stats *netwave_get_stats(struct net_device *dev);
 
 /* Wireless extensions */
-#ifdef WIRELESS_EXT
 static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
-#endif
-static int netwave_ioctl(struct net_device *, struct ifreq *, int);
 
 static void set_multicast_list(struct net_device *dev);
 
@@ -260,26 +255,7 @@ static dev_link_t *dev_list;
    because they generally can't be allocated dynamically.
 */
 
-#if WIRELESS_EXT <= 12
-/* Wireless extensions backward compatibility */
-
-/* Part of iw_handler prototype we need */
-struct iw_request_info
-{
-       __u16           cmd;            /* Wireless Extension command */
-       __u16           flags;          /* More to come ;-) */
-};
-
-/* Wireless Extension Backward compatibility - Jean II
- * If the new wireless device private ioctl range is not defined,
- * default to standard device private ioctl range */
-#ifndef SIOCIWFIRSTPRIV
-#define SIOCIWFIRSTPRIV        SIOCDEVPRIVATE
-#endif /* SIOCIWFIRSTPRIV */
-
-#else  /* WIRELESS_EXT <= 12 */
 static const struct iw_handler_def     netwave_handler_def;
-#endif /* WIRELESS_EXT <= 12 */
 
 #define SIOCGIPSNAP    SIOCIWFIRSTPRIV + 1     /* Site Survey Snapshot */
 
@@ -319,9 +295,7 @@ typedef struct netwave_private {
     struct timer_list      watchdog;   /* To avoid blocking state */
     struct site_survey     nss;
     struct net_device_stats stats;
-#ifdef WIRELESS_EXT
     struct iw_statistics   iw_stats;    /* Wireless stats */
-#endif
 } netwave_private;
 
 #ifdef NETWAVE_STATS
@@ -353,7 +327,6 @@ static inline void wait_WOC(unsigned int iobase)
     while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ; 
 }
 
-#ifdef WIRELESS_EXT
 static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, 
                             kio_addr_t iobase) {
     u_short resultBuffer;
@@ -376,9 +349,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
                      sizeof(struct site_survey)); 
     } 
 }
-#endif
 
-#ifdef WIRELESS_EXT
 /*
  * Function netwave_get_wireless_stats (dev)
  *
@@ -411,7 +382,6 @@ static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
     
     return &priv->iw_stats;
 }
-#endif
 
 /*
  * Function netwave_attach (void)
@@ -471,13 +441,7 @@ static dev_link_t *netwave_attach(void)
     dev->get_stats  = &netwave_get_stats;
     dev->set_multicast_list = &set_multicast_list;
     /* wireless extensions */
-#if WIRELESS_EXT <= 16
-    dev->get_wireless_stats = &netwave_get_wireless_stats;
-#endif /* WIRELESS_EXT <= 16 */
-#if WIRELESS_EXT > 12
     dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def;
-#endif /* WIRELESS_EXT > 12 */
-    dev->do_ioctl = &netwave_ioctl;
 
     dev->tx_timeout = &netwave_watchdog;
     dev->watchdog_timeo = TX_TIMEOUT;
@@ -576,13 +540,8 @@ static int netwave_set_nwid(struct net_device *dev,
        /* Disable interrupts & save flags */
        spin_lock_irqsave(&priv->spinlock, flags);
 
-#if WIRELESS_EXT > 8
        if(!wrqu->nwid.disabled) {
            domain = wrqu->nwid.value;
-#else  /* WIRELESS_EXT > 8 */
-       if(wrqu->nwid.on) {
-           domain = wrqu->nwid.nwid;
-#endif /* WIRELESS_EXT > 8 */
            printk( KERN_DEBUG "Setting domain to 0x%x%02x\n", 
                    (domain >> 8) & 0x01, domain & 0xff);
            wait_WOC(iobase);
@@ -606,15 +565,9 @@ static int netwave_get_nwid(struct net_device *dev,
                            union iwreq_data *wrqu,
                            char *extra)
 {
-#if WIRELESS_EXT > 8
        wrqu->nwid.value = domain;
        wrqu->nwid.disabled = 0;
        wrqu->nwid.fixed = 1;
-#else  /* WIRELESS_EXT > 8 */
-       wrqu->nwid.nwid = domain;
-       wrqu->nwid.on = 1;
-#endif /* WIRELESS_EXT > 8 */
-
        return 0;
 }
 
@@ -657,17 +610,11 @@ static int netwave_get_scramble(struct net_device *dev,
 {
        key[1] = scramble_key & 0xff;
        key[0] = (scramble_key>>8) & 0xff;
-#if WIRELESS_EXT > 8
        wrqu->encoding.flags = IW_ENCODE_ENABLED;
        wrqu->encoding.length = 2;
-#else /* WIRELESS_EXT > 8 */
-       wrqu->encoding.method = 1;
-#endif /* WIRELESS_EXT > 8 */
-
        return 0;
 }
 
-#if WIRELESS_EXT > 8
 /*
  * Wireless Handler : get mode
  */
@@ -683,7 +630,6 @@ static int netwave_get_mode(struct net_device *dev,
 
        return 0;
 }
-#endif /* WIRELESS_EXT > 8 */
 
 /*
  * Wireless Handler : get range info
@@ -702,11 +648,9 @@ static int netwave_get_range(struct net_device *dev,
        /* Set all the info we don't care or don't know about to zero */
        memset(range, 0, sizeof(struct iw_range));
 
-#if WIRELESS_EXT > 10
        /* Set the Wireless Extension versions */
        range->we_version_compiled = WIRELESS_EXT;
        range->we_version_source = 9;   /* Nothing for us in v10 and v11 */
-#endif /* WIRELESS_EXT > 10 */
                   
        /* Set information in the range struct */
        range->throughput = 450 * 1000; /* don't argue on this ! */
@@ -720,16 +664,12 @@ static int netwave_get_range(struct net_device *dev,
        range->max_qual.level = 255;
        range->max_qual.noise = 0;
                   
-#if WIRELESS_EXT > 7
        range->num_bitrates = 1;
        range->bitrate[0] = 1000000;    /* 1 Mb/s */
-#endif /* WIRELESS_EXT > 7 */
 
-#if WIRELESS_EXT > 8
        range->encoding_size[0] = 2;            /* 16 bits scrambling */
        range->num_encoding_sizes = 1;
        range->max_encoding_tokens = 1; /* Only one key possible */
-#endif /* WIRELESS_EXT > 8 */
 
        return ret;
 }
@@ -775,8 +715,6 @@ static const struct iw_priv_args netwave_private_args[] = {
     "getsitesurvey" },
 };
 
-#if WIRELESS_EXT > 12
-
 static const iw_handler                netwave_handler[] =
 {
        NULL,                           /* SIOCSIWNAME */
@@ -839,131 +777,8 @@ static const struct iw_handler_def        netwave_handler_def =
        .standard       = (iw_handler *) netwave_handler,
        .private        = (iw_handler *) netwave_private_handler,
        .private_args   = (struct iw_priv_args *) netwave_private_args,
-#if WIRELESS_EXT > 16
        .get_wireless_stats = netwave_get_wireless_stats,
-#endif /* WIRELESS_EXT > 16 */
 };
-#endif /* WIRELESS_EXT > 12 */
-
-/*
- * Function netwave_ioctl (dev, rq, cmd)
- *
- *     Perform ioctl : config & info stuff
- *     This is the stuff that are treated the wireless extensions (iwconfig)
- *
- */
-static int netwave_ioctl(struct net_device *dev, /* ioctl device */
-                        struct ifreq *rq,       /* Data passed */
-                        int    cmd)         /* Ioctl number */
-{
-    int                        ret = 0;
-#ifdef WIRELESS_EXT
-#if WIRELESS_EXT <= 12
-    struct iwreq *wrq = (struct iwreq *) rq;
-#endif
-#endif
-       
-    DEBUG(0, "%s: ->netwave_ioctl(cmd=0x%X)\n", dev->name, cmd);
-       
-    /* Look what is the request */
-    switch(cmd) {
-       /* --------------- WIRELESS EXTENSIONS --------------- */
-#ifdef WIRELESS_EXT
-#if WIRELESS_EXT <= 12
-    case SIOCGIWNAME:
-       netwave_get_name(dev, NULL, &(wrq->u), NULL);
-       break;
-    case SIOCSIWNWID:
-       ret = netwave_set_nwid(dev, NULL, &(wrq->u), NULL);
-       break;
-    case SIOCGIWNWID:
-       ret = netwave_get_nwid(dev, NULL, &(wrq->u), NULL);
-       break;
-#if WIRELESS_EXT > 8   /* Note : The API did change... */
-    case SIOCGIWENCODE:
-       /* Get scramble key */
-       if(wrq->u.encoding.pointer != (caddr_t) 0)
-         {
-           char        key[2];
-           ret = netwave_get_scramble(dev, NULL, &(wrq->u), key);
-           if(copy_to_user(wrq->u.encoding.pointer, key, 2))
-             ret = -EFAULT;
-         }
-       break;
-    case SIOCSIWENCODE:
-       /* Set  scramble key */
-       if(wrq->u.encoding.pointer != (caddr_t) 0)
-         {
-           char        key[2];
-           if(copy_from_user(key, wrq->u.encoding.pointer, 2))
-             {
-               ret = -EFAULT;
-               break;
-             }
-           ret = netwave_set_scramble(dev, NULL, &(wrq->u), key);
-         }
-       break;
-    case SIOCGIWMODE:
-       /* Mode of operation */
-       ret = netwave_get_mode(dev, NULL, &(wrq->u), NULL);
-       break;
-#else /* WIRELESS_EXT > 8 */
-    case SIOCGIWENCODE:
-       /* Get scramble key */
-       ret = netwave_get_scramble(dev, NULL, &(wrq->u),
-                                  (char *) &wrq->u.encoding.code);
-       break;
-    case SIOCSIWENCODE:
-       /* Set  scramble key */
-       ret = netwave_set_scramble(dev, NULL, &(wrq->u),
-                                  (char *) &wrq->u.encoding.code);
-       break;
-#endif /* WIRELESS_EXT > 8 */
-   case SIOCGIWRANGE:
-       /* Basic checking... */
-       if(wrq->u.data.pointer != (caddr_t) 0) {
-           struct iw_range range;
-          ret = netwave_get_range(dev, NULL, &(wrq->u), (char *) &range);
-          if (copy_to_user(wrq->u.data.pointer, &range,
-                           sizeof(struct iw_range)))
-              ret = -EFAULT;
-       }
-       break;
-    case SIOCGIWPRIV:
-       /* Basic checking... */
-       if(wrq->u.data.pointer != (caddr_t) 0) {
-           /* Set the number of ioctl available */
-           wrq->u.data.length = sizeof(netwave_private_args) / sizeof(netwave_private_args[0]);
-                       
-           /* Copy structure to the user buffer */
-           if(copy_to_user(wrq->u.data.pointer,
-                           (u_char *) netwave_private_args,
-                           sizeof(netwave_private_args)))
-             ret = -EFAULT;
-       } 
-       break;
-    case SIOCGIPSNAP:
-       if(wrq->u.data.pointer != (caddr_t) 0) {
-           char buffer[sizeof( struct site_survey)];
-           ret = netwave_get_snap(dev, NULL, &(wrq->u), buffer);
-           /* Copy structure to the user buffer */
-           if(copy_to_user(wrq->u.data.pointer, 
-                           buffer,
-                           sizeof( struct site_survey)))
-             {
-               printk(KERN_DEBUG "Bad buffer!\n");
-               break;
-             }
-       }
-       break;
-#endif /* WIRELESS_EXT <= 12 */
-#endif /* WIRELESS_EXT */
-    default:
-       ret = -EOPNOTSUPP;
-    }
-       
-    return ret;
-}
 
 /*
  * Function netwave_pcmcia_config (link)
index 15ceaf615756a2ceb397993c384b81ef117354a5..d3d4ec9e242e311a16127e635c37585d8fc1d61e 100644 (file)
 #define DRIVER_NAME "orinoco"
 
 #include <linux/config.h>
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
 #include <linux/netdevice.h>
-#include <linux/if_arp.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/wireless.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211.h>
 
-#include <net/ieee80211.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-#include "hermes.h"
 #include "hermes_rid.h"
 #include "orinoco.h"
 
@@ -137,7 +123,7 @@ MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
 
 /* We do this this way to avoid ifdefs in the actual code */
 #ifdef WIRELESS_SPY
-#define SPY_NUMBER(priv)       (priv->spy_number)
+#define SPY_NUMBER(priv)       (priv->spy_data.spy_number)
 #else
 #define SPY_NUMBER(priv)       0
 #endif /* WIRELESS_SPY */
@@ -216,31 +202,32 @@ static struct {
 /********************************************************************/
 
 /* Used in Event handling.
- * We avoid nested structres as they break on ARM -- Moustafa */
+ * We avoid nested structures as they break on ARM -- Moustafa */
 struct hermes_tx_descriptor_802_11 {
        /* hermes_tx_descriptor */
-       u16 status;
-       u16 reserved1;
-       u16 reserved2;
-       u32 sw_support;
+       __le16 status;
+       __le16 reserved1;
+       __le16 reserved2;
+       __le32 sw_support;
        u8 retry_count;
        u8 tx_rate;
-       u16 tx_control;
+       __le16 tx_control;
 
-       /* ieee802_11_hdr */
-       u16 frame_ctl;
-       u16 duration_id;
+       /* ieee80211_hdr */
+       __le16 frame_ctl;
+       __le16 duration_id;
        u8 addr1[ETH_ALEN];
        u8 addr2[ETH_ALEN];
        u8 addr3[ETH_ALEN];
-       u16 seq_ctl;
+       __le16 seq_ctl;
        u8 addr4[ETH_ALEN];
-       u16 data_len;
+
+       __le16 data_len;
 
        /* ethhdr */
-       unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
-       unsigned char   h_source[ETH_ALEN];     /* source ether addr    */
-       unsigned short  h_proto;                /* packet type ID field */
+       u8 h_dest[ETH_ALEN];    /* destination eth addr */
+       u8 h_source[ETH_ALEN];  /* source ether addr    */
+       __be16 h_proto;         /* packet type ID field */
 
        /* p8022_hdr */
        u8 dsap;
@@ -248,31 +235,31 @@ struct hermes_tx_descriptor_802_11 {
        u8 ctrl;
        u8 oui[3];
 
-       u16 ethertype;
+       __be16 ethertype;
 } __attribute__ ((packed));
 
 /* Rx frame header except compatibility 802.3 header */
 struct hermes_rx_descriptor {
        /* Control */
-       u16 status;
-       u32 time;
+       __le16 status;
+       __le32 time;
        u8 silence;
        u8 signal;
        u8 rate;
        u8 rxflow;
-       u32 reserved;
+       __le32 reserved;
 
        /* 802.11 header */
-       u16 frame_ctl;
-       u16 duration_id;
+       __le16 frame_ctl;
+       __le16 duration_id;
        u8 addr1[ETH_ALEN];
        u8 addr2[ETH_ALEN];
        u8 addr3[ETH_ALEN];
-       u16 seq_ctl;
+       __le16 seq_ctl;
        u8 addr4[ETH_ALEN];
 
        /* Data length */
-       u16 data_len;
+       __le16 data_len;
 } __attribute__ ((packed));
 
 /********************************************************************/
@@ -396,14 +383,14 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
                /* If a spy address is defined, we report stats of the
                 * first spy address - Jean II */
                if (SPY_NUMBER(priv)) {
-                       wstats->qual.qual = priv->spy_stat[0].qual;
-                       wstats->qual.level = priv->spy_stat[0].level;
-                       wstats->qual.noise = priv->spy_stat[0].noise;
-                       wstats->qual.updated = priv->spy_stat[0].updated;
+                       wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
+                       wstats->qual.level = priv->spy_data.spy_stat[0].level;
+                       wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
+                       wstats->qual.updated = priv->spy_data.spy_stat[0].updated;
                }
        } else {
                struct {
-                       u16 qual, signal, noise;
+                       __le16 qual, signal, noise;
                } __attribute__ ((packed)) cq;
 
                err = HERMES_READ_RECORD(hw, USER_BAP,
@@ -505,11 +492,9 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* Check packet length, pad short packets, round up odd length */
        len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
-       if (skb->len < len) {
-               skb = skb_padto(skb, len);
-               if (skb == NULL)
-                       goto fail;
-       }
+       skb = skb_padto(skb, len);
+       if (skb == NULL)
+               goto fail;
        len -= ETH_HLEN;
 
        eh = (struct ethhdr *)skb->data;
@@ -634,16 +619,17 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
        struct orinoco_private *priv = netdev_priv(dev);
        struct net_device_stats *stats = &priv->stats;
        u16 fid = hermes_read_regn(hw, TXCOMPLFID);
+       u16 status;
        struct hermes_tx_descriptor_802_11 hdr;
        int err = 0;
 
        if (fid == DUMMY_FID)
                return; /* Nothing's really happened */
 
-       /* Read the frame header */
+       /* Read part of the frame header - we need status and addr1 */
        err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
-                              sizeof(struct hermes_tx_descriptor) +
-                              sizeof(struct ieee80211_hdr),
+                              offsetof(struct hermes_tx_descriptor_802_11,
+                                       addr2),
                               fid, 0);
 
        hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
@@ -663,8 +649,8 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
         * exceeded, because that's the only status that really mean
         * that this particular node went away.
         * Other errors means that *we* screwed up. - Jean II */
-       hdr.status = le16_to_cpu(hdr.status);
-       if (hdr.status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
+       status = le16_to_cpu(hdr.status);
+       if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
                union iwreq_data        wrqu;
 
                /* Copy 802.11 dest address.
@@ -723,18 +709,13 @@ static inline int is_ethersnap(void *_hdr)
 static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
                                      int level, int noise)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
-       int i;
-
-       /* Gather wireless spy statistics: for each packet, compare the
-        * source address with out list, and if match, get the stats... */
-       for (i = 0; i < priv->spy_number; i++)
-               if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
-                       priv->spy_stat[i].level = level - 0x95;
-                       priv->spy_stat[i].noise = noise - 0x95;
-                       priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
-                       priv->spy_stat[i].updated = 7;
-               }
+       struct iw_quality wstats;
+       wstats.level = level - 0x95;
+       wstats.noise = noise - 0x95;
+       wstats.qual = (level > noise) ? (level - noise) : 0;
+       wstats.updated = 7;
+       /* Update spy records */
+       wireless_spy_update(dev, mac, &wstats);
 }
 
 static void orinoco_stat_gather(struct net_device *dev,
@@ -1055,7 +1036,7 @@ static void orinoco_join_ap(struct net_device *dev)
        unsigned long flags;
        struct join_req {
                u8 bssid[ETH_ALEN];
-               u16 channel;
+               __le16 channel;
        } __attribute__ ((packed)) req;
        const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
        struct prism2_scan_apinfo *atom = NULL;
@@ -1070,7 +1051,7 @@ static void orinoco_join_ap(struct net_device *dev)
                return;
 
        if (orinoco_lock(priv, &flags) != 0)
-               goto out;
+               goto fail_lock;
 
        /* Sanity checks in case user changed something in the meantime */
        if (! priv->bssid_fixed)
@@ -1115,8 +1096,10 @@ static void orinoco_join_ap(struct net_device *dev)
                printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
 
  out:
-       kfree(buf);
        orinoco_unlock(priv, &flags);
+
+ fail_lock:
+       kfree(buf);
 }
 
 /* Send new BSSID to userspace */
@@ -1134,12 +1117,14 @@ static void orinoco_send_wevents(struct net_device *dev)
        err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID,
                              ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
        if (err != 0)
-               return;
+               goto out;
 
        wrqu.ap_addr.sa_family = ARPHRD_ETHER;
 
        /* Send event to user space */
        wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
+
+ out:
        orinoco_unlock(priv, &flags);
 }
 
@@ -1148,8 +1133,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
        struct orinoco_private *priv = netdev_priv(dev);
        u16 infofid;
        struct {
-               u16 len;
-               u16 type;
+               __le16 len;
+               __le16 type;
        } __attribute__ ((packed)) info;
        int len, type;
        int err;
@@ -2464,6 +2449,10 @@ struct net_device *alloc_orinocodev(int sizeof_card,
        dev->get_stats = orinoco_get_stats;
        dev->ethtool_ops = &orinoco_ethtool_ops;
        dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def;
+#ifdef WIRELESS_SPY
+       priv->wireless_data.spy_data = &priv->spy_data;
+       dev->wireless_data = &priv->wireless_data;
+#endif
        dev->change_mtu = orinoco_change_mtu;
        dev->set_multicast_list = orinoco_set_multicast_list;
        /* we use the default eth_mac_addr for setting the MAC addr */
@@ -2835,7 +2824,7 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
                }
        }
 
-       if ((priv->iw_mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){
+       if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
                /* Quality stats meaningless in ad-hoc mode */
        } else {
                range->max_qual.qual = 0x8b - 0x2f;
@@ -2882,6 +2871,14 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
        range->min_r_time = 0;
        range->max_r_time = 65535 * 1000;       /* ??? */
 
+       /* Event capability (kernel) */
+       IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+       /* Event capability (driver) */
+       IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
+       IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+       IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+       IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+
        TRACE_EXIT(dev->name);
 
        return 0;
@@ -3841,92 +3838,6 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
        return err;
 }
 
-/* Spy is used for link quality/strength measurements in Ad-Hoc mode
- * Jean II */
-static int orinoco_ioctl_setspy(struct net_device *dev,
-                               struct iw_request_info *info,
-                               struct iw_point *srq,
-                               char *extra)
-
-{
-       struct orinoco_private *priv = netdev_priv(dev);
-       struct sockaddr *address = (struct sockaddr *) extra;
-       int number = srq->length;
-       int i;
-       unsigned long flags;
-
-       /* Make sure nobody mess with the structure while we do */
-       if (orinoco_lock(priv, &flags) != 0)
-               return -EBUSY;
-
-       /* orinoco_lock() doesn't disable interrupts, so make sure the
-        * interrupt rx path don't get confused while we copy */
-       priv->spy_number = 0;
-
-       if (number > 0) {
-               /* Extract the addresses */
-               for (i = 0; i < number; i++)
-                       memcpy(priv->spy_address[i], address[i].sa_data,
-                              ETH_ALEN);
-               /* Reset stats */
-               memset(priv->spy_stat, 0,
-                      sizeof(struct iw_quality) * IW_MAX_SPY);
-               /* Set number of addresses */
-               priv->spy_number = number;
-       }
-
-       /* Now, let the others play */
-       orinoco_unlock(priv, &flags);
-
-       /* Do NOT call commit handler */
-       return 0;
-}
-
-static int orinoco_ioctl_getspy(struct net_device *dev,
-                               struct iw_request_info *info,
-                               struct iw_point *srq,
-                               char *extra)
-{
-       struct orinoco_private *priv = netdev_priv(dev);
-       struct sockaddr *address = (struct sockaddr *) extra;
-       int number;
-       int i;
-       unsigned long flags;
-
-       if (orinoco_lock(priv, &flags) != 0)
-               return -EBUSY;
-
-       number = priv->spy_number;
-       /* Create address struct */
-       for (i = 0; i < number; i++) {
-               memcpy(address[i].sa_data, priv->spy_address[i], ETH_ALEN);
-               address[i].sa_family = AF_UNIX;
-       }
-       if (number > 0) {
-               /* Create address struct */
-               for (i = 0; i < number; i++) {
-                       memcpy(address[i].sa_data, priv->spy_address[i],
-                              ETH_ALEN);
-                       address[i].sa_family = AF_UNIX;
-               }
-               /* Copy stats */
-               /* In theory, we should disable irqs while copying the stats
-                * because the rx path might update it in the middle...
-                * Bah, who care ? - Jean II */
-               memcpy(extra  + (sizeof(struct sockaddr) * number),
-                      priv->spy_stat, sizeof(struct iw_quality) * number);
-       }
-       /* Reset updated flags. */
-       for (i = 0; i < number; i++)
-               priv->spy_stat[i].updated = 0;
-
-       orinoco_unlock(priv, &flags);
-
-       srq->length = number;
-
-       return 0;
-}
-
 /* Trigger a scan (look for other cells in the vicinity */
 static int orinoco_ioctl_setscan(struct net_device *dev,
                                 struct iw_request_info *info,
@@ -3999,7 +3910,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
                                                   HERMES_HOSTSCAN_SYMBOL_BCAST);
                        break;
                case FIRMWARE_TYPE_INTERSIL: {
-                       u16 req[3];
+                       __le16 req[3];
 
                        req[0] = cpu_to_le16(0x3fff);   /* All channels */
                        req[1] = cpu_to_le16(0x0001);   /* rate 1 Mbps */
@@ -4073,7 +3984,7 @@ static inline int orinoco_translate_scan(struct net_device *dev,
        case FIRMWARE_TYPE_INTERSIL:
                offset = 4;
                if (priv->has_hostscan) {
-                       atom_len = le16_to_cpup((u16 *)scan);
+                       atom_len = le16_to_cpup((__le16 *)scan);
                        /* Sanity check for atom_len */
                        if (atom_len < sizeof(struct prism2_scan_apinfo)) {
                                printk(KERN_ERR "%s: Invalid atom_len in scan data: %d\n",
@@ -4357,8 +4268,10 @@ static const iw_handler  orinoco_handler[] = {
        [SIOCSIWSENS  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
        [SIOCGIWSENS  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
        [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
-       [SIOCSIWSPY   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
-       [SIOCGIWSPY   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
+       [SIOCSIWSPY   -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
+       [SIOCGIWSPY   -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
+       [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
+       [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
        [SIOCSIWAP    -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
        [SIOCGIWAP    -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
        [SIOCSIWSCAN  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
index 2f213a7103fe70cee7ca41ca66f0994928471f85..7a17bb31fc896da55e1943c021aaa2ad7c125c51 100644 (file)
@@ -7,12 +7,11 @@
 #ifndef _ORINOCO_H
 #define _ORINOCO_H
 
-#define DRIVER_VERSION "0.15rc2"
+#define DRIVER_VERSION "0.15rc3"
 
-#include <linux/types.h>
-#include <linux/spinlock.h>
 #include <linux/netdevice.h>
 #include <linux/wireless.h>
+#include <net/iw_handler.h>
 #include <linux/version.h>
 
 #include "hermes.h"
@@ -28,7 +27,7 @@
 #define ORINOCO_MAX_KEYS       4
 
 struct orinoco_key {
-       u16 len;        /* always stored as little-endian */
+       __le16 len;     /* always stored as little-endian */
        char data[ORINOCO_MAX_KEY_SIZE];
 } __attribute__ ((packed));
 
@@ -36,14 +35,14 @@ struct header_struct {
        /* 802.3 */
        u8 dest[ETH_ALEN];
        u8 src[ETH_ALEN];
-       u16 len;
+       __be16 len;
        /* 802.2 */
        u8 dsap;
        u8 ssap;
        u8 ctrl;
        /* SNAP */
        u8 oui[3];
-       u16 ethertype;
+       unsigned short ethertype;
 } __attribute__ ((packed));
 
 typedef enum {
@@ -112,9 +111,8 @@ struct orinoco_private {
        u16 pm_on, pm_mcast, pm_period, pm_timeout;
        u16 preamble;
 #ifdef WIRELESS_SPY
-       int                     spy_number;
-       u_char                  spy_address[IW_MAX_SPY][ETH_ALEN];
-       struct iw_quality       spy_stat[IW_MAX_SPY];
+       struct iw_spy_data spy_data; /* iwspy support */
+       struct iw_public_data   wireless_data;
 #endif
 
        /* Configuration dependent variables */
index bedd7f9f23e48cd37201e79f6a3b4c3b35697bc6..dc1128a009719811c39a426ef132a8dd88c94ca7 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-#ifdef  __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif /* __IN_PCMCIA_PACKAGE__ */
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-
+#include <linux/delay.h>
 #include <pcmcia/cs_types.h>
 #include <pcmcia/cs.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ds.h>
 
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
 #include "orinoco.h"
 
 /********************************************************************/
@@ -97,17 +80,8 @@ static dev_link_t *dev_list; /* = NULL */
 /* Function prototypes                                             */
 /********************************************************************/
 
-/* device methods */
-static int orinoco_cs_hard_reset(struct orinoco_private *priv);
-
-/* PCMCIA gumpf */
-static void orinoco_cs_config(dev_link_t * link);
-static void orinoco_cs_release(dev_link_t * link);
-static int orinoco_cs_event(event_t event, int priority,
-                           event_callback_args_t * args);
-
-static dev_link_t *orinoco_cs_attach(void);
-static void orinoco_cs_detach(dev_link_t *);
+static void orinoco_cs_release(dev_link_t *link);
+static void orinoco_cs_detach(dev_link_t *link);
 
 /********************************************************************/
 /* Device methods                                                  */
@@ -603,49 +577,85 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
        "Pavel Roskin <proski@gnu.org>, et al)";
 
 static struct pcmcia_device_id orinoco_cs_ids[] = {
-       PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
-       PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a),
-       PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001),
-       PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305),
-       PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613),
-       PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673),
-       PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001),
-       PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
-       PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021),
-       PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
-       PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
+       PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
+       PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
+       PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
+       PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
+       PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
+       PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
+       PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
+       PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
+       PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
+       PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
+       PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
+       PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
+       PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
+       PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
+       PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
+       PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
+       PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
+       PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
+       PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
+       PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
+       PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
+       PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
+       PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
+       PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
+       PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
+       PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
+       PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
+       PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
        PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
-       PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
        PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
+       PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
+       PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092),
+       PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
+       PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
+       PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
        PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
+       PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
        PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
+       PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
        PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+       PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
+       PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39),
        PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
        PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
        PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
        PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
        PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
+       PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
+       PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
        PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
        PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
+       PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
+       PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
        PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
+       PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
+       PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
+       PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2),
+       PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
+       PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
        PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
        PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
        PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
        PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
        PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
+       PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
        PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
+       PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+       PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
+       PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
+       PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39),
        PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
+       PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
        PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
        PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
        PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
-       PCMCIA_DEVICE_PROD_ID1("Symbol Technologies", 0x3f02b4d6),
+       PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
+       PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+       PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39),
+       PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
        PCMCIA_DEVICE_NULL,
 };
 MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
@@ -656,8 +666,8 @@ static struct pcmcia_driver orinoco_driver = {
                .name   = DRIVER_NAME,
        },
        .attach         = orinoco_cs_attach,
-       .event          = orinoco_cs_event,
        .detach         = orinoco_cs_detach,
+       .event          = orinoco_cs_event,
        .id_table       = orinoco_cs_ids,
 };
 
index 86fa58e5cfac87279c28f729c8c840c6f446a9d7..d8afd51ff8a59010dd89f26dddcc38ff2e20496c 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/fcntl.h>
-
 #include <pcmcia/cisreg.h>
 
-#include "hermes.h"
 #include "orinoco.h"
 
 #define COR_OFFSET    (0xe0)   /* COR attribute offset of Prism2 PC card */
@@ -108,7 +92,7 @@ static int nortel_pci_cor_reset(struct orinoco_private *priv)
        return 0;
 }
 
-int nortel_pci_hw_init(struct nortel_pci_card *card)
+static int nortel_pci_hw_init(struct nortel_pci_card *card)
 {
        int i;
        u32 reg;
index 42e03438291b8e3ad7967ea93198c996211434ed..5362c214fc8e299da8bd0003f5d2bd76ab9fea6f 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/fcntl.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
 
-#include "hermes.h"
 #include "orinoco.h"
 
 /* All the magic there is from wlan-ng */
index 7ab05b89fb3f6176405cf1244c78061afe633980..210e73776545d6750e0e99289ff7f023acf2b69a 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/fcntl.h>
-
 #include <pcmcia/cisreg.h>
 
-#include "hermes.h"
 #include "orinoco.h"
 
 #define COR_OFFSET     (0x3e0) /* COR attribute offset of Prism2 PC card */
index 85893f42445be7e227c7fd3627747d66e984b656..5e68b7026186bc9f497c1abbf9d14259e4a96bca 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/fcntl.h>
-
 #include <pcmcia/cisreg.h>
 
-#include "hermes.h"
 #include "orinoco.h"
 
 #define COR_VALUE      (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
index 9a8790e3580c648d33f948717c38bc0d94a29728..5c1a1adf1ff8b2421d93549f765c071c2bc9e0fb 100644 (file)
@@ -462,14 +462,12 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
        /* txpower is supported in dBm's */
        range->txpower_capa = IW_TXPOW_DBM;
 
-#if WIRELESS_EXT > 16
        /* Event capability (kernel + driver) */
        range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
        IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
        IW_EVENT_CAPA_MASK(SIOCGIWAP));
        range->event_capa[1] = IW_EVENT_CAPA_K_1;
        range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
-#endif /* WIRELESS_EXT > 16 */
 
        if (islpci_get_state(priv) < PRV_STATE_INIT)
                return 0;
@@ -693,14 +691,13 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
                                                   extra + dwrq->length,
                                                   &(bsslist->bsslist[i]),
                                                   noise);
-#if WIRELESS_EXT > 16
+
                /* Check if there is space for one more entry */
                if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
                        /* Ask user space to try again with a bigger buffer */
                        rvalue = -E2BIG;
                        break;
                }
-#endif /* WIRELESS_EXT > 16 */
        }
 
        kfree(bsslist);
@@ -2727,12 +2724,7 @@ const struct iw_handler_def prism54_handler_def = {
        .standard = (iw_handler *) prism54_handler,
        .private = (iw_handler *) prism54_private_handler,
        .private_args = (struct iw_priv_args *) prism54_private_args,
-#if WIRELESS_EXT > 16
        .get_wireless_stats = prism54_get_wireless_stats,
-#endif /* WIRELESS_EXT > 16 */
-#if WIRELESS_EXT == 16
-       .spy_offset = offsetof(islpci_private, spy_data),
-#endif /* WIRELESS_EXT == 16 */
 };
 
 /* For wpa_supplicant */
index 6f13d4a8e2d33805b93952d1663459afb7cd8995..6c9584a9f284dc1b357af18804d6856b47260156 100644 (file)
@@ -439,8 +439,7 @@ prism54_bring_down(islpci_private *priv)
        wmb();
 
        /* wait a while for the device to reset */
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(50*HZ/1000);
+       schedule_timeout_uninterruptible(msecs_to_jiffies(50));
 
        return 0;
 }
@@ -491,8 +490,7 @@ islpci_reset_if(islpci_private *priv)
                /* The software reset acknowledge needs about 220 msec here.
                 * Be conservative and wait for up to one second. */
        
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               remaining = schedule_timeout(HZ);
+               remaining = schedule_timeout_uninterruptible(HZ);
 
                if(remaining > 0) {
                        result = 0;
@@ -839,13 +837,9 @@ islpci_setup(struct pci_dev *pdev)
        priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
                priv->monitor_type : ARPHRD_ETHER;
 
-#if WIRELESS_EXT > 16
        /* Add pointers to enable iwspy support. */
        priv->wireless_data.spy_data = &priv->spy_data;
        ndev->wireless_data = &priv->wireless_data;
-#else  /* WIRELESS_EXT > 16 */
-       ndev->get_wireless_stats = &prism54_get_wireless_stats;
-#endif /* WIRELESS_EXT > 16 */
 
        /* save the start and end address of the PCI memory area */
        ndev->mem_start = (unsigned long) priv->device_base;
index 32a1019f1b363ef9c8486eba3138c159f5dee784..efbed439795111ac4e9021f56b60c4fc00ac2dad 100644 (file)
@@ -100,9 +100,7 @@ typedef struct {
 
        struct iw_spy_data spy_data; /* iwspy support */
 
-#if WIRELESS_EXT > 16
        struct iw_public_data wireless_data;
-#endif /* WIRELESS_EXT > 16 */
 
        int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */
 
index b6f2e5a223be623e56f4f5a156cf96a3ea8d5abf..4937a5ad4b2cee319d63eabf5ee423b2e8aa955c 100644 (file)
@@ -455,7 +455,7 @@ islpci_mgt_transaction(struct net_device *ndev,
                       struct islpci_mgmtframe **recvframe)
 {
        islpci_private *priv = netdev_priv(ndev);
-       const long wait_cycle_jiffies = (ISL38XX_WAIT_CYCLE * 10 * HZ) / 1000;
+       const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
        long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
        int err;
        DEFINE_WAIT(wait);
@@ -475,8 +475,7 @@ islpci_mgt_transaction(struct net_device *ndev,
                int timeleft;
                struct islpci_mgmtframe *frame;
 
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               timeleft = schedule_timeout(wait_cycle_jiffies);
+               timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
                frame = xchg(&priv->mgmt_received, NULL);
                if (frame) {
                        if (frame->header->oid == oid) {
index e9c5ea0f5535dada4e4416fdad26d81caa6de948..70fd6fd8feb9bd5fb856043e8e195ddfccea84e6 100644 (file)
@@ -1649,28 +1649,28 @@ static iw_stats * ray_get_wireless_stats(struct net_device *    dev)
  */
 
 static const iw_handler        ray_handler[] = {
-       [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit,
-       [SIOCGIWNAME  -SIOCIWFIRST] (iw_handler) ray_get_name,
-       [SIOCSIWFREQ  -SIOCIWFIRST] (iw_handler) ray_set_freq,
-       [SIOCGIWFREQ  -SIOCIWFIRST] (iw_handler) ray_get_freq,
-       [SIOCSIWMODE  -SIOCIWFIRST] (iw_handler) ray_set_mode,
-       [SIOCGIWMODE  -SIOCIWFIRST] (iw_handler) ray_get_mode,
-       [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range,
+       [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit,
+       [SIOCGIWNAME  -SIOCIWFIRST] (iw_handler) ray_get_name,
+       [SIOCSIWFREQ  -SIOCIWFIRST] (iw_handler) ray_set_freq,
+       [SIOCGIWFREQ  -SIOCIWFIRST] (iw_handler) ray_get_freq,
+       [SIOCSIWMODE  -SIOCIWFIRST] (iw_handler) ray_set_mode,
+       [SIOCGIWMODE  -SIOCIWFIRST] (iw_handler) ray_get_mode,
+       [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range,
 #ifdef WIRELESS_SPY
-       [SIOCSIWSPY   -SIOCIWFIRST] (iw_handler) iw_handler_set_spy,
-       [SIOCGIWSPY   -SIOCIWFIRST] (iw_handler) iw_handler_get_spy,
-       [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy,
-       [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy,
+       [SIOCSIWSPY   -SIOCIWFIRST] (iw_handler) iw_handler_set_spy,
+       [SIOCGIWSPY   -SIOCIWFIRST] (iw_handler) iw_handler_get_spy,
+       [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy,
+       [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy,
 #endif /* WIRELESS_SPY */
-       [SIOCGIWAP    -SIOCIWFIRST] (iw_handler) ray_get_wap,
-       [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid,
-       [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid,
-       [SIOCSIWRATE  -SIOCIWFIRST] (iw_handler) ray_set_rate,
-       [SIOCGIWRATE  -SIOCIWFIRST] (iw_handler) ray_get_rate,
-       [SIOCSIWRTS   -SIOCIWFIRST] (iw_handler) ray_set_rts,
-       [SIOCGIWRTS   -SIOCIWFIRST] (iw_handler) ray_get_rts,
-       [SIOCSIWFRAG  -SIOCIWFIRST] (iw_handler) ray_set_frag,
-       [SIOCGIWFRAG  -SIOCIWFIRST] (iw_handler) ray_get_frag,
+       [SIOCGIWAP    -SIOCIWFIRST] (iw_handler) ray_get_wap,
+       [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid,
+       [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid,
+       [SIOCSIWRATE  -SIOCIWFIRST] (iw_handler) ray_set_rate,
+       [SIOCGIWRATE  -SIOCIWFIRST] (iw_handler) ray_get_rate,
+       [SIOCSIWRTS   -SIOCIWFIRST] (iw_handler) ray_set_rts,
+       [SIOCGIWRTS   -SIOCIWFIRST] (iw_handler) ray_get_rts,
+       [SIOCSIWFRAG  -SIOCIWFIRST] (iw_handler) ray_set_frag,
+       [SIOCGIWFRAG  -SIOCIWFIRST] (iw_handler) ray_get_frag,
 };
 
 #define SIOCSIPFRAMING SIOCIWFIRSTPRIV         /* Set framing mode */
@@ -1678,9 +1678,9 @@ static const iw_handler   ray_handler[] = {
 #define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3     /* Get country code */
 
 static const iw_handler        ray_private_handler[] = {
-       [0] (iw_handler) ray_set_framing,
-       [1] (iw_handler) ray_get_framing,
-       [3] (iw_handler) ray_get_country,
+       [0] (iw_handler) ray_set_framing,
+       [1] (iw_handler) ray_get_framing,
+       [3] (iw_handler) ray_get_country,
 };
 
 static const struct iw_priv_args       ray_private_args[] = {
index 39c6cdf7f3f736707c968008bdd11a0e7952fbed..b1bbc8e8e91f432e9e46cb44b969cbc70e550bc1 100644 (file)
 #define PFX DRIVER_NAME ": "
 
 #include <linux/config.h>
-#ifdef  __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif /* __IN_PCMCIA_PACKAGE__ */
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-
+#include <linux/delay.h>
+#include <linux/firmware.h>
 #include <pcmcia/cs_types.h>
 #include <pcmcia/cs.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ds.h>
 
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
 #include "orinoco.h"
 
-/*
- * If SPECTRUM_FW_INCLUDED is defined, the firmware is hardcoded into
- * the driver.  Use get_symbol_fw script to generate spectrum_fw.h and
- * copy it to the same directory as spectrum_cs.c.
- *
- * If SPECTRUM_FW_INCLUDED is not defined, the firmware is loaded at the
- * runtime using hotplug.  Use the same get_symbol_fw script to generate
- * files symbol_sp24t_prim_fw symbol_sp24t_sec_fw, copy them to the
- * hotplug firmware directory (typically /usr/lib/hotplug/firmware) and
- * make sure that you have hotplug installed and enabled in the kernel.
- */
-/* #define SPECTRUM_FW_INCLUDED 1 */
-
-#ifdef SPECTRUM_FW_INCLUDED
-/* Header with the firmware */
-#include "spectrum_fw.h"
-#else  /* !SPECTRUM_FW_INCLUDED */
-#include <linux/firmware.h>
 static unsigned char *primsym;
 static unsigned char *secsym;
 static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
 static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
-#endif /* !SPECTRUM_FW_INCLUDED */
 
 /********************************************************************/
 /* Module stuff                                                            */
@@ -124,17 +89,8 @@ static dev_link_t *dev_list; /* = NULL */
 /* Function prototypes                                             */
 /********************************************************************/
 
-/* device methods */
-static int spectrum_cs_hard_reset(struct orinoco_private *priv);
-
-/* PCMCIA gumpf */
-static void spectrum_cs_config(dev_link_t * link);
-static void spectrum_cs_release(dev_link_t * link);
-static int spectrum_cs_event(event_t event, int priority,
-                           event_callback_args_t * args);
-
-static dev_link_t *spectrum_cs_attach(void);
-static void spectrum_cs_detach(dev_link_t *);
+static void spectrum_cs_release(dev_link_t *link);
+static void spectrum_cs_detach(dev_link_t *link);
 
 /********************************************************************/
 /* Firmware downloader                                             */
@@ -182,8 +138,8 @@ static void spectrum_cs_detach(dev_link_t *);
  * Each block has the following structure.
  */
 struct dblock {
-       u32 _addr;              /* adapter address where to write the block */
-       u16 _len;               /* length of the data only, in bytes */
+       __le32 _addr;           /* adapter address where to write the block */
+       __le16 _len;            /* length of the data only, in bytes */
        char data[0];           /* data to be written */
 } __attribute__ ((packed));
 
@@ -193,9 +149,9 @@ struct dblock {
  * items with matching ID should be written.
  */
 struct pdr {
-       u32 _id;                /* record ID */
-       u32 _addr;              /* adapter address where to write the data */
-       u32 _len;               /* expected length of the data, in bytes */
+       __le32 _id;             /* record ID */
+       __le32 _addr;           /* adapter address where to write the data */
+       __le32 _len;            /* expected length of the data, in bytes */
        char next[0];           /* next PDR starts here */
 } __attribute__ ((packed));
 
@@ -206,8 +162,8 @@ struct pdr {
  * be plugged into the secondary firmware.
  */
 struct pdi {
-       u16 _len;               /* length of ID and data, in words */
-       u16 _id;                /* record ID */
+       __le16 _len;            /* length of ID and data, in words */
+       __le16 _id;             /* record ID */
        char data[0];           /* plug data */
 } __attribute__ ((packed));;
 
@@ -414,7 +370,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
 
 /* Read PDA from the adapter */
 static int
-spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len)
+spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len)
 {
        int ret;
        int pda_size;
@@ -445,7 +401,7 @@ spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len)
 /* Parse PDA and write the records into the adapter */
 static int
 spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
-                  u16 *pda)
+                  __le16 *pda)
 {
        int ret;
        struct pdi *pdi;
@@ -511,7 +467,7 @@ spectrum_dl_image(hermes_t *hw, dev_link_t *link,
        const struct dblock *first_block;
 
        /* Plug Data Area (PDA) */
-       u16 pda[PDA_WORDS];
+       __le16 pda[PDA_WORDS];
 
        /* Binary block begins after the 0x1A marker */
        ptr = image;
@@ -571,8 +527,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
 {
        int ret;
        client_handle_t handle = link->handle;
-
-#ifndef SPECTRUM_FW_INCLUDED
        const struct firmware *fw_entry;
 
        if (request_firmware(&fw_entry, primary_fw_name,
@@ -592,7 +546,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
                       secondary_fw_name);
                return -ENOENT;
        }
-#endif
 
        /* Load primary firmware */
        ret = spectrum_dl_image(hw, link, primsym);
@@ -1085,7 +1038,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
 static struct pcmcia_device_id spectrum_cs_ids[] = {
        PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */
        PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
-       PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0001), /* Intel PRO/Wireless 2011B */
+       PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
        PCMCIA_DEVICE_NULL,
 };
 MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids);
@@ -1096,8 +1049,8 @@ static struct pcmcia_driver orinoco_driver = {
                .name   = DRIVER_NAME,
        },
        .attach         = spectrum_cs_attach,
-       .event          = spectrum_cs_event,
        .detach         = spectrum_cs_detach,
+       .event          = spectrum_cs_event,
        .id_table       = spectrum_cs_ids,
 };
 
index 7a5e20a17890c81316c8bac8dcefd0963c29288e..b0d8b5b03152769974b382809418b47d1bd6fb08 100644 (file)
@@ -430,7 +430,6 @@ static void fee_read(unsigned long ioaddr,  /* I/O port of the card */
        }
 }
 
-#ifdef WIRELESS_EXT            /* if the wireless extension exists in the kernel */
 
 /*------------------------------------------------------------------*/
 /*
@@ -514,7 +513,6 @@ static void fee_write(unsigned long ioaddr, /* I/O port of the card */
        fee_wait(ioaddr, 10, 100);
 #endif                         /* EEPROM_IS_PROTECTED */
 }
-#endif                         /* WIRELESS_EXT */
 
 /************************ I82586 SUBROUTINES *************************/
 /*
@@ -973,11 +971,9 @@ static void wv_mmc_show(struct net_device * dev)
        mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m));
        mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
 
-#ifdef WIRELESS_EXT            /* if wireless extension exists in the kernel */
        /* Don't forget to update statistics */
        lp->wstats.discard.nwid +=
            (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
-#endif                         /* WIRELESS_EXT */
 
        printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
 #ifdef DEBUG_SHOW_UNUSED
@@ -1499,7 +1495,6 @@ static int wavelan_set_mac_address(struct net_device * dev, void *addr)
 }
 #endif                         /* SET_MAC_ADDRESS */
 
-#ifdef WIRELESS_EXT            /* if wireless extensions exist in the kernel */
 
 /*------------------------------------------------------------------*/
 /*
@@ -2473,7 +2468,6 @@ static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
 #endif
        return &lp->wstats;
 }
-#endif                         /* WIRELESS_EXT */
 
 /************************* PACKET RECEPTION *************************/
 /*
@@ -4194,11 +4188,9 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
        dev->set_mac_address = &wavelan_set_mac_address;
 #endif                         /* SET_MAC_ADDRESS */
 
-#ifdef WIRELESS_EXT            /* if wireless extension exists in the kernel */
        dev->wireless_handlers = &wavelan_handler_def;
        lp->wireless_data.spy_data = &lp->spy_data;
        dev->wireless_data = &lp->wireless_data;
-#endif
 
        dev->mtu = WAVELAN_MTU;
 
index 509ff22a6caa674f5bc41fe41780611d45362146..166e28b9a4f7fca1464b5b500cd1759a0b2a6e34 100644 (file)
 #define MULTICAST_AVOID                /* Avoid extra multicast (I'm sceptical). */
 #undef SET_MAC_ADDRESS         /* Experimental */
 
-#ifdef WIRELESS_EXT    /* If wireless extensions exist in the kernel */
 /* Warning:  this stuff will slow down the driver. */
 #define WIRELESS_SPY           /* Enable spying addresses. */
 #undef HISTOGRAM               /* Enable histogram of signal level. */
-#endif
 
 /****************************** DEBUG ******************************/
 
@@ -506,12 +504,10 @@ struct net_local
   u_short      tx_first_free;
   u_short      tx_first_in_use;
 
-#ifdef WIRELESS_EXT
   iw_stats     wstats;         /* Wireless-specific statistics */
 
   struct iw_spy_data   spy_data;
   struct iw_public_data        wireless_data;
-#endif
 
 #ifdef HISTOGRAM
   int          his_number;             /* number of intervals */
index 183c4732ef65ca0eb24e99c79d1b43197a3f8930..4b3c98f5c564fba27800d496cd42695419924c65 100644 (file)
@@ -415,7 +415,6 @@ fee_read(u_long             base,   /* i/o port of the card */
     }
 }
 
-#ifdef WIRELESS_EXT    /* If wireless extension exist in the kernel */
 
 /*------------------------------------------------------------------*/
 /*
@@ -500,7 +499,6 @@ fee_write(u_long    base,   /* i/o port of the card */
   fee_wait(base, 10, 100);
 #endif /* EEPROM_IS_PROTECTED */
 }
-#endif /* WIRELESS_EXT */
 
 /******************* WaveLAN Roaming routines... ********************/
 
@@ -1161,10 +1159,8 @@ wv_mmc_show(struct net_device *  dev)
   mmc_read(base, 0, (u_char *)&m, sizeof(m));
   mmc_out(base, mmwoff(0, mmw_freeze), 0);
 
-#ifdef WIRELESS_EXT    /* If wireless extension exist in the kernel */
   /* Don't forget to update statistics */
   lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
-#endif /* WIRELESS_EXT */
 
   spin_unlock_irqrestore(&lp->spinlock, flags);
 
@@ -1550,7 +1546,6 @@ wavelan_set_mac_address(struct net_device *       dev,
 }
 #endif /* SET_MAC_ADDRESS */
 
-#ifdef WIRELESS_EXT    /* If wireless extension exist in the kernel */
 
 /*------------------------------------------------------------------*/
 /*
@@ -2793,7 +2788,6 @@ wavelan_get_wireless_stats(struct net_device *    dev)
 #endif
   return &lp->wstats;
 }
-#endif /* WIRELESS_EXT */
 
 /************************* PACKET RECEPTION *************************/
 /*
@@ -4679,11 +4673,9 @@ wavelan_attach(void)
   dev->watchdog_timeo  = WATCHDOG_JIFFIES;
   SET_ETHTOOL_OPS(dev, &ops);
 
-#ifdef WIRELESS_EXT    /* If wireless extension exist in the kernel */
   dev->wireless_handlers = &wavelan_handler_def;
   lp->wireless_data.spy_data = &lp->spy_data;
   dev->wireless_data = &lp->wireless_data;
-#endif
 
   /* Other specific data */
   dev->mtu = WAVELAN_MTU;
index 01d882be8790c1b9c8d2b81d71fcc64248210dad..724a715089c996f79b7e59e04ebf049bd3b0b182 100644 (file)
 #define MULTICAST_AVOID                /* Avoid extra multicast (I'm sceptical) */
 #undef SET_MAC_ADDRESS         /* Experimental */
 
-#ifdef WIRELESS_EXT    /* If wireless extension exist in the kernel */
 /* Warning : these stuff will slow down the driver... */
 #define WIRELESS_SPY           /* Enable spying addresses */
 #undef HISTOGRAM               /* Enable histogram of sig level... */
-#endif
 
 /****************************** DEBUG ******************************/
 
@@ -624,12 +622,10 @@ struct net_local
   int          rfp;            /* Last DMA machine receive pointer */
   int          overrunning;    /* Receiver overrun flag */
 
-#ifdef WIRELESS_EXT
   iw_stats     wstats;         /* Wireless specific stats */
 
   struct iw_spy_data   spy_data;
   struct iw_public_data        wireless_data;
-#endif
 
 #ifdef HISTOGRAM
   int          his_number;             /* Number of intervals */
index 7fcbe589c3f2bf4808284e1dfc595c8127fb6589..4303c50c2ab619eec386f2ec3f4b4bdc43b768f3 100644 (file)
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
 
 struct wl3501_80211_tx_hdr {
        struct wl3501_80211_tx_plcp_hdr pclp_hdr;
-       struct ieee80211_hdr            mac_hdr;
+       struct ieee80211_hdr_4addr              mac_hdr;
 } __attribute__ ((packed));
 
 /*
index 0e98a9d9834cb5f7c32a0a4317b1fdedc98d6e7b..a3bd91a618274672e31b30ff7913772ae34187f9 100644 (file)
@@ -836,7 +836,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
  * This function implements the pci_alloc_consistent function.
  */
 static void * 
-ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
+ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
 {
       void *ret;
 #if 0
index 82ea68b55df4f435a35ed1d69f12d7a62f14c132..bd8b3e5a5cd76ebf79e6056fc852555f8ee6f03a 100644 (file)
@@ -986,7 +986,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
  * See Documentation/DMA-mapping.txt
  */
 static void *sba_alloc_consistent(struct device *hwdev, size_t size,
-                                       dma_addr_t *dma_handle, int gfp)
+                                       dma_addr_t *dma_handle, gfp_t gfp)
 {
        void *ret;
 
index bb90a1448a53d0995b3067c07cd10015c8e3754a..81ded52c8959aadc6efc6673de0604f0d11a84a1 100644 (file)
@@ -122,7 +122,7 @@ void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
 
 static int pcmcia_probe(struct sa1111_dev *dev)
 {
-       char *base;
+       void __iomem *base;
 
        if (!request_mem_region(dev->res.start, 512,
                                SA1111_DRIVER_NAME(dev)))
index fa09440d82e549e59f0700ca09da9fed7fb31543..38f50b7129a24fce668493909b15d385de086db3 100644 (file)
@@ -16,7 +16,7 @@ MODULE_LICENSE("GPL");
 
 fsm_instance *
 init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
-               int nr_events, const fsm_node *tmpl, int tmpl_len, int order)
+               int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
 {
        int i;
        fsm_instance *this;
index f9a011001eb693fe90f8b816fe23e3690b10db1e..1b8a7e7c34f3cd9f9d95857b335999eced5ddf3f 100644 (file)
@@ -110,7 +110,7 @@ extern fsm_instance *
 init_fsm(char *name, const char **state_names,
         const char **event_names,
         int nr_states, int nr_events, const fsm_node *tmpl,
-        int tmpl_len, int order);
+        int tmpl_len, gfp_t order);
 
 /**
  * Releases an FSM
index 9963479ba89f71df304cdb87ab552da5ea834eaf..38a2441564d7d85bc94d2b931f4d641f619a7f0a 100644 (file)
@@ -275,6 +275,10 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
        QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
        QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
        QETH_MAX_QUEUES,0x103}, \
+       {0x1731,0x06,0x1732,0x06,QETH_CARD_TYPE_OSN,0, \
+       QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
+       QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
+       QETH_MAX_QUEUES,0}, \
        {0,0,0,0,0,0,0,0,0}}
 
 #define QETH_REAL_CARD         1
@@ -363,10 +367,22 @@ struct qeth_hdr_layer2 {
        __u8 reserved2[16];
 } __attribute__ ((packed));
 
+struct qeth_hdr_osn {
+       __u8 id;
+       __u8 reserved;
+       __u16 seq_no;
+       __u16 reserved2;
+       __u16 control_flags;
+       __u16 pdu_length;
+       __u8 reserved3[18];
+       __u32 ccid;
+} __attribute__ ((packed));
+                                           
 struct qeth_hdr {
        union {
                struct qeth_hdr_layer2 l2;
                struct qeth_hdr_layer3 l3;
+               struct qeth_hdr_osn    osn;
        } hdr;
 } __attribute__ ((packed));
 
@@ -413,6 +429,7 @@ enum qeth_header_ids {
        QETH_HEADER_TYPE_LAYER3 = 0x01,
        QETH_HEADER_TYPE_LAYER2 = 0x02,
        QETH_HEADER_TYPE_TSO    = 0x03,
+       QETH_HEADER_TYPE_OSN    = 0x04,
 };
 /* flags for qeth_hdr.ext_flags */
 #define QETH_HDR_EXT_VLAN_FRAME       0x01
@@ -582,7 +599,6 @@ enum qeth_card_states {
  * Protocol versions
  */
 enum qeth_prot_versions {
-       QETH_PROT_SNA  = 0x0001,
        QETH_PROT_IPV4 = 0x0004,
        QETH_PROT_IPV6 = 0x0006,
 };
@@ -761,6 +777,11 @@ enum qeth_threads {
        QETH_RECOVER_THREAD = 2,
 };
 
+struct qeth_osn_info {
+       int (*assist_cb)(struct net_device *dev, void *data);
+       int (*data_cb)(struct sk_buff *skb);
+};
+
 struct qeth_card {
        struct list_head list;
        enum qeth_card_states state;
@@ -803,6 +824,7 @@ struct qeth_card {
        int use_hard_stop;
        int (*orig_hard_header)(struct sk_buff *,struct net_device *,
                                unsigned short,void *,void *,unsigned);
+       struct qeth_osn_info osn_info; 
 };
 
 struct qeth_card_list_struct {
@@ -916,10 +938,12 @@ qeth_get_hlen(__u8 link_type)
 static inline unsigned short
 qeth_get_netdev_flags(struct qeth_card *card)
 {
-       if (card->options.layer2)
+       if (card->options.layer2 &&
+          (card->info.type == QETH_CARD_TYPE_OSAE))
                return 0;
        switch (card->info.type) {
        case QETH_CARD_TYPE_IQD:
+       case QETH_CARD_TYPE_OSN:        
                return IFF_NOARP;
 #ifdef CONFIG_QETH_IPV6
        default:
@@ -956,9 +980,10 @@ static inline int
 qeth_get_max_mtu_for_card(int cardtype)
 {
        switch (cardtype) {
+               
        case QETH_CARD_TYPE_UNKNOWN:
-               return 61440;
        case QETH_CARD_TYPE_OSAE:
+       case QETH_CARD_TYPE_OSN:
                return 61440;
        case QETH_CARD_TYPE_IQD:
                return 57344;
@@ -1004,6 +1029,7 @@ qeth_mtu_is_valid(struct qeth_card * card, int mtu)
        case QETH_CARD_TYPE_IQD:
                return ((mtu >= 576) &&
                        (mtu <= card->info.max_mtu + 4096 - 32));
+       case QETH_CARD_TYPE_OSN:
        case QETH_CARD_TYPE_UNKNOWN:
        default:
                return 1;
@@ -1015,6 +1041,7 @@ qeth_get_arphdr_type(int cardtype, int linktype)
 {
        switch (cardtype) {
        case QETH_CARD_TYPE_OSAE:
+       case QETH_CARD_TYPE_OSN:
                switch (linktype) {
                case QETH_LINK_TYPE_LANE_TR:
                case QETH_LINK_TYPE_HSTR:
@@ -1182,4 +1209,16 @@ qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
 extern void
 qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
 
+extern int
+qeth_osn_assist(struct net_device *, void *, int);
+
+extern int
+qeth_osn_register(unsigned char *read_dev_no,
+                 struct net_device **,
+                 int (*assist_cb)(struct net_device *, void *),
+                 int (*data_cb)(struct sk_buff *));
+
+extern void
+qeth_osn_deregister(struct net_device *);
+               
 #endif /* __QETH_H__ */
index 5c9a51ce91b6287301364fc7e3179d3024a3bfac..c0b4c8d82c45e3108927eb6c2d27c61147199db0 100644 (file)
@@ -12,7 +12,7 @@
 #ifndef __QETH_FS_H__
 #define __QETH_FS_H__
 
-#define VERSION_QETH_FS_H "$Revision: 1.9 $"
+#define VERSION_QETH_FS_H "$Revision: 1.10 $"
 
 extern const char *VERSION_QETH_PROC_C;
 extern const char *VERSION_QETH_SYS_C;
@@ -42,6 +42,12 @@ qeth_create_device_attributes(struct device *dev);
 extern void
 qeth_remove_device_attributes(struct device *dev);
 
+extern int
+qeth_create_device_attributes_osn(struct device *dev);
+
+extern void
+qeth_remove_device_attributes_osn(struct device *dev);
+                   
 extern int
 qeth_create_driver_attributes(void);
 
@@ -108,6 +114,8 @@ qeth_get_cardname(struct qeth_card *card)
                        return " OSD Express";
                case QETH_CARD_TYPE_IQD:
                        return " HiperSockets";
+               case QETH_CARD_TYPE_OSN:
+                       return " OSN QDIO";
                default:
                        return " unknown";
                }
@@ -153,6 +161,8 @@ qeth_get_cardname_short(struct qeth_card *card)
                        }
                case QETH_CARD_TYPE_IQD:
                        return "HiperSockets";
+               case QETH_CARD_TYPE_OSN:
+                       return "OSN";
                default:
                        return "unknown";
                }
index bd28e2438d7f5168272a1ee94a4e94e46295ac81..692003c9f896fd3dc54ea8265f8c834e00a80533 100644 (file)
@@ -196,7 +196,6 @@ qeth_notifier_register(struct task_struct *p, int signum)
 {
        struct qeth_notify_list_struct *n_entry;
 
-
        /*check first if entry already exists*/
        spin_lock(&qeth_notify_lock);
        list_for_each_entry(n_entry, &qeth_notify_list, list) {
@@ -1024,7 +1023,10 @@ qeth_set_intial_options(struct qeth_card *card)
        card->options.fake_broadcast = 0;
        card->options.add_hhlen = DEFAULT_ADD_HHLEN;
        card->options.fake_ll = 0;
-       card->options.layer2 = 0;
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               card->options.layer2 = 1;
+       else
+               card->options.layer2 = 0;
 }
 
 /**
@@ -1113,19 +1115,20 @@ qeth_determine_card_type(struct qeth_card *card)
 
        QETH_DBF_TEXT(setup, 2, "detcdtyp");
 
+       card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+       card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
        while (known_devices[i][4]) {
                if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
                    (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
                        card->info.type = known_devices[i][4];
+                       card->qdio.no_out_queues = known_devices[i][8];
+                       card->info.is_multicast_different = known_devices[i][9];
                        if (is_1920_device(card)) {
                                PRINT_INFO("Priority Queueing not able "
                                           "due to hardware limitations!\n");
                                card->qdio.no_out_queues = 1;
                                card->qdio.default_out_queue = 0;
-                       } else {
-                               card->qdio.no_out_queues = known_devices[i][8];
-                       }
-                       card->info.is_multicast_different = known_devices[i][9];
+                       } 
                        return 0;
                }
                i++;
@@ -1149,6 +1152,8 @@ qeth_probe_device(struct ccwgroup_device *gdev)
        if (!get_device(dev))
                return -ENODEV;
 
+       QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
+       
        card = qeth_alloc_card();
        if (!card) {
                put_device(dev);
@@ -1158,28 +1163,27 @@ qeth_probe_device(struct ccwgroup_device *gdev)
        card->read.ccwdev  = gdev->cdev[0];
        card->write.ccwdev = gdev->cdev[1];
        card->data.ccwdev  = gdev->cdev[2];
-
-       if ((rc = qeth_setup_card(card))){
-               QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
-               put_device(dev);
-               qeth_free_card(card);
-               return rc;
-       }
        gdev->dev.driver_data = card;
        card->gdev = gdev;
        gdev->cdev[0]->handler = qeth_irq;
        gdev->cdev[1]->handler = qeth_irq;
        gdev->cdev[2]->handler = qeth_irq;
 
-       rc = qeth_create_device_attributes(dev);
-       if (rc) {
+       if ((rc = qeth_determine_card_type(card))){
+               PRINT_WARN("%s: not a valid card type\n", __func__);
+               QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+               put_device(dev);
+               qeth_free_card(card);
+               return rc;
+       }                           
+       if ((rc = qeth_setup_card(card))){
+               QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
                put_device(dev);
                qeth_free_card(card);
                return rc;
        }
-       if ((rc = qeth_determine_card_type(card))){
-               PRINT_WARN("%s: not a valid card type\n", __func__);
-               QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+       rc = qeth_create_device_attributes(dev);
+       if (rc) {
                put_device(dev);
                qeth_free_card(card);
                return rc;
@@ -1660,6 +1664,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
                                netif_carrier_on(card->dev);
                                qeth_schedule_recovery(card);
                                return NULL;
+                       case IPA_CMD_MODCCID:
+                               return cmd;
                        case IPA_CMD_REGISTER_LOCAL_ADDR:
                                QETH_DBF_TEXT(trace,3, "irla");
                                break;
@@ -1721,6 +1727,14 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
        cmd = qeth_check_ipa_data(card, iob);
        if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
                goto out;
+       /*in case of OSN : check if cmd is set */
+       if (card->info.type == QETH_CARD_TYPE_OSN &&
+           cmd &&
+           cmd->hdr.command != IPA_CMD_STARTLAN &&
+           card->osn_info.assist_cb != NULL) {
+               card->osn_info.assist_cb(card->dev, cmd);
+               goto out;
+       }
 
        spin_lock_irqsave(&card->lock, flags);
        list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
@@ -1737,8 +1751,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
                                        keep_reply = reply->callback(card,
                                                        reply,
                                                        (unsigned long)cmd);
-                               }
-                               else
+                               } else
                                        keep_reply = reply->callback(card,
                                                        reply,
                                                        (unsigned long)iob);
@@ -1768,6 +1781,24 @@ out:
        qeth_release_buffer(channel,iob);
 }
 
+static inline void
+qeth_prepare_control_data(struct qeth_card *card, int len,
+struct qeth_cmd_buffer *iob)
+{
+       qeth_setup_ccw(&card->write,iob->data,len);
+       iob->callback = qeth_release_buffer;
+
+       memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+              &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+       card->seqno.trans_hdr++;
+       memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
+              &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
+       card->seqno.pdu_hdr++;
+       memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
+              &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
+       QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
+}
+                                                   
 static int
 qeth_send_control_data(struct qeth_card *card, int len,
                       struct qeth_cmd_buffer *iob,
@@ -1778,24 +1809,11 @@ qeth_send_control_data(struct qeth_card *card, int len,
 {
        int rc;
        unsigned long flags;
-       struct qeth_reply *reply;
+       struct qeth_reply *reply = NULL;
        struct timer_list timer;
 
        QETH_DBF_TEXT(trace, 2, "sendctl");
 
-       qeth_setup_ccw(&card->write,iob->data,len);
-
-       memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
-              &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
-       card->seqno.trans_hdr++;
-
-       memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
-              &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
-       card->seqno.pdu_hdr++;
-       memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
-              &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
-       iob->callback = qeth_release_buffer;
-
        reply = qeth_alloc_reply(card);
        if (!reply) {
                PRINT_WARN("Could no alloc qeth_reply!\n");
@@ -1810,10 +1828,6 @@ qeth_send_control_data(struct qeth_card *card, int len,
        init_timer(&timer);
        timer.function = qeth_cmd_timeout;
        timer.data = (unsigned long) reply;
-       if (IS_IPA(iob->data))
-               timer.expires = jiffies + QETH_IPA_TIMEOUT;
-       else
-               timer.expires = jiffies + QETH_TIMEOUT;
        init_waitqueue_head(&reply->wait_q);
        spin_lock_irqsave(&card->lock, flags);
        list_add_tail(&reply->list, &card->cmd_waiter_list);
@@ -1821,6 +1835,11 @@ qeth_send_control_data(struct qeth_card *card, int len,
        QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
        wait_event(card->wait_q,
                   atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+       qeth_prepare_control_data(card, len, iob);
+       if (IS_IPA(iob->data))
+               timer.expires = jiffies + QETH_IPA_TIMEOUT;
+       else
+               timer.expires = jiffies + QETH_TIMEOUT;
        QETH_DBF_TEXT(trace, 6, "noirqpnd");
        spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
        rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
@@ -1847,6 +1866,62 @@ qeth_send_control_data(struct qeth_card *card, int len,
        return rc;
 }
 
+static int
+qeth_osn_send_control_data(struct qeth_card *card, int len,
+                          struct qeth_cmd_buffer *iob)
+{
+       unsigned long flags;
+       int rc = 0;
+
+       QETH_DBF_TEXT(trace, 5, "osndctrd");
+
+       wait_event(card->wait_q,
+                  atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+       qeth_prepare_control_data(card, len, iob);
+       QETH_DBF_TEXT(trace, 6, "osnoirqp");
+       spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+       rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+                             (addr_t) iob, 0, 0);
+       spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+       if (rc){
+               PRINT_WARN("qeth_osn_send_control_data: "
+                          "ccw_device_start rc = %i\n", rc);
+               QETH_DBF_TEXT_(trace, 2, " err%d", rc);
+               qeth_release_buffer(iob->channel, iob);
+               atomic_set(&card->write.irq_pending, 0);
+               wake_up(&card->wait_q);
+       }
+       return rc;
+}                                      
+
+static inline void
+qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+                    char prot_type)
+{
+       memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+       memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
+       memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+              &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+}
+
+static int
+qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+                     int data_len)
+{
+       u16 s1, s2;
+
+QETH_DBF_TEXT(trace,4,"osndipa");
+
+       qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
+       s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
+       s2 = (u16)data_len;
+       memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+       memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+       memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+       memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+       return qeth_osn_send_control_data(card, s1, iob);
+}
+                                                           
 static int
 qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
                  int (*reply_cb)
@@ -1858,17 +1933,14 @@ qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 
        QETH_DBF_TEXT(trace,4,"sendipa");
 
-       memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
-
        if (card->options.layer2)
-               prot_type = QETH_PROT_LAYER2;
+               if (card->info.type == QETH_CARD_TYPE_OSN)
+                       prot_type = QETH_PROT_OSN2;
+               else
+                       prot_type = QETH_PROT_LAYER2;
        else
                prot_type = QETH_PROT_TCPIP;
-
-       memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
-       memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
-              &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
-
+       qeth_prepare_ipa_cmd(card,iob,prot_type);
        rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
                                    reply_cb, reply_param);
        return rc;
@@ -2010,7 +2082,10 @@ qeth_ulp_enable(struct qeth_card *card)
        *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
                (__u8) card->info.portno;
        if (card->options.layer2)
-               prot_type = QETH_PROT_LAYER2;
+               if (card->info.type == QETH_CARD_TYPE_OSN)
+                       prot_type = QETH_PROT_OSN2;
+               else
+                       prot_type = QETH_PROT_LAYER2;
        else
                prot_type = QETH_PROT_TCPIP;
 
@@ -2100,15 +2175,21 @@ qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
 }
 
 static inline struct sk_buff *
-qeth_get_skb(unsigned int length)
+qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
 {
        struct sk_buff* skb;
+       int add_len;
+
+       add_len = 0;
+       if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
+               add_len = sizeof(struct qeth_hdr);
 #ifdef CONFIG_QETH_VLAN
-       if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
-               skb_reserve(skb, VLAN_HLEN);
-#else
-       skb = dev_alloc_skb(length);
+       else
+               add_len = VLAN_HLEN;
 #endif
+       skb = dev_alloc_skb(length + add_len);
+       if (skb && add_len)
+               skb_reserve(skb, add_len);
        return skb;
 }
 
@@ -2138,7 +2219,10 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
 
        offset += sizeof(struct qeth_hdr);
        if (card->options.layer2)
-               skb_len = (*hdr)->hdr.l2.pkt_length;
+               if (card->info.type == QETH_CARD_TYPE_OSN)
+                       skb_len = (*hdr)->hdr.osn.pdu_length;
+               else
+                       skb_len = (*hdr)->hdr.l2.pkt_length;
        else
                skb_len = (*hdr)->hdr.l3.length;
 
@@ -2146,15 +2230,15 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
                return NULL;
        if (card->options.fake_ll){
                if(card->dev->type == ARPHRD_IEEE802_TR){
-                       if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR)))
+                       if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
                                goto no_mem;
                        skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
                } else {
-                       if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH)))
+                       if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
                                goto no_mem;
                        skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
                }
-       } else if (!(skb = qeth_get_skb(skb_len)))
+       } else if (!(skb = qeth_get_skb(skb_len, *hdr)))
                goto no_mem;
        data_ptr = element->addr + offset;
        while (skb_len) {
@@ -2453,8 +2537,12 @@ qeth_process_inbound_buffer(struct qeth_card *card,
                skb->dev = card->dev;
                if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
                        vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
-               else
+               else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)     
                        qeth_rebuild_skb(card, skb, hdr);
+               else { /*in case of OSN*/
+                       skb_push(skb, sizeof(struct qeth_hdr));
+                       memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
+               }
                /* is device UP ? */
                if (!(card->dev->flags & IFF_UP)){
                        dev_kfree_skb_any(skb);
@@ -2465,7 +2553,10 @@ qeth_process_inbound_buffer(struct qeth_card *card,
                        vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
                else
 #endif
-               rxrc = netif_rx(skb);
+               if (card->info.type == QETH_CARD_TYPE_OSN)
+                       rxrc = card->osn_info.data_cb(skb);
+               else
+                       rxrc = netif_rx(skb);
                card->dev->last_rx = jiffies;
                card->stats.rx_packets++;
                card->stats.rx_bytes += skb->len;
@@ -3150,8 +3241,6 @@ qeth_init_qdio_info(struct qeth_card *card)
        INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
        INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
        /* outbound */
-       card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
-       card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
 }
 
 static int
@@ -3466,7 +3555,7 @@ qeth_mpc_initialize(struct qeth_card *card)
 
        return 0;
 out_qdio:
-       qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
+       qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
        return rc;
 }
 
@@ -3491,6 +3580,9 @@ qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
        case QETH_CARD_TYPE_IQD:
                dev = alloc_netdev(0, "hsi%d", ether_setup);
                break;
+       case QETH_CARD_TYPE_OSN:
+               dev = alloc_netdev(0, "osn%d", ether_setup);
+               break;
        default:
                dev = alloc_etherdev(0);
        }
@@ -3655,7 +3747,8 @@ qeth_open(struct net_device *dev)
        if (card->state != CARD_STATE_SOFTSETUP)
                return -ENODEV;
 
-       if ( (card->options.layer2) &&
+       if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
+            (card->options.layer2) &&
             (!card->info.layer2_mac_registered)) {
                QETH_DBF_TEXT(trace,4,"nomacadr");
                return -EPERM;
@@ -3693,6 +3786,9 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 {
        int cast_type = RTN_UNSPEC;
 
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return cast_type;
+
        if (skb->dst && skb->dst->neighbour){
                cast_type = skb->dst->neighbour->type;
                if ((cast_type == RTN_BROADCAST) ||
@@ -3782,13 +3878,16 @@ static inline int
 qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
                 struct qeth_hdr **hdr, int ipv)
 {
-       int rc;
+       int rc = 0;
 #ifdef CONFIG_QETH_VLAN
        u16 *tag;
 #endif
 
        QETH_DBF_TEXT(trace, 6, "prepskb");
-
+       if (card->info.type == QETH_CARD_TYPE_OSN) {
+               *hdr = (struct qeth_hdr *)(*skb)->data;
+               return rc;
+       }
         rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
         if (rc)
                 return rc;
@@ -4291,8 +4390,14 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
                        }
                }
        }
+       if ((card->info.type == QETH_CARD_TYPE_OSN) &&
+               (skb->protocol == htons(ETH_P_IPV6))) {
+               dev_kfree_skb_any(skb);
+               return 0;
+       }
        cast_type = qeth_get_cast_type(card, skb);
-       if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){
+       if ((cast_type == RTN_BROADCAST) && 
+           (card->info.broadcast_capable == 0)){
                card->stats.tx_dropped++;
                card->stats.tx_errors++;
                dev_kfree_skb_any(skb);
@@ -4320,7 +4425,8 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
                        QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
                        return rc;
                }
-               qeth_fill_header(card, hdr, skb, ipv, cast_type);
+               if (card->info.type != QETH_CARD_TYPE_OSN)
+                       qeth_fill_header(card, hdr, skb, ipv, cast_type);
        }
 
        if (large_send == QETH_LARGE_SEND_EDDP) {
@@ -4381,6 +4487,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
        case MII_BMCR: /* Basic mode control register */
                rc = BMCR_FULLDPLX;
                if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
+                   (card->info.link_type != QETH_LINK_TYPE_OSN) &&
                    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
                        rc |= BMCR_SPEED100;
                break;
@@ -5004,6 +5111,9 @@ qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
             (card->state != CARD_STATE_SOFTSETUP))
                return -ENODEV;
 
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return -EPERM;
+
        switch (cmd){
        case SIOC_QETH_ARP_SET_NO_ENTRIES:
                if ( !capable(CAP_NET_ADMIN) ||
@@ -5329,6 +5439,9 @@ qeth_set_multicast_list(struct net_device *dev)
 {
        struct qeth_card *card = (struct qeth_card *) dev->priv;
 
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return ;
+        
        QETH_DBF_TEXT(trace,3,"setmulti");
        qeth_delete_mc_addresses(card);
        qeth_add_multicast_ipv4(card);
@@ -5370,6 +5483,94 @@ qeth_get_addr_buffer(enum qeth_prot_versions prot)
        return addr;
 }
 
+int
+qeth_osn_assist(struct net_device *dev,
+               void *data,
+               int data_len)
+{
+       struct qeth_cmd_buffer *iob;
+       struct qeth_card *card;
+       int rc;
+       
+       QETH_DBF_TEXT(trace, 2, "osnsdmc");
+       if (!dev)
+               return -ENODEV;
+       card = (struct qeth_card *)dev->priv;
+       if (!card)
+               return -ENODEV;
+       if ((card->state != CARD_STATE_UP) &&
+           (card->state != CARD_STATE_SOFTSETUP))
+               return -ENODEV;
+       iob = qeth_wait_for_buffer(&card->write);
+       memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
+       rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
+       return rc;
+}
+
+static struct net_device *
+qeth_netdev_by_devno(unsigned char *read_dev_no)
+{
+       struct qeth_card *card;
+       struct net_device *ndev;
+       unsigned char *readno;
+       __u16 temp_dev_no, card_dev_no;
+       char *endp;
+       unsigned long flags;
+
+       ndev = NULL;
+       memcpy(&temp_dev_no, read_dev_no, 2);
+       read_lock_irqsave(&qeth_card_list.rwlock, flags);
+       list_for_each_entry(card, &qeth_card_list.list, list) {
+               readno = CARD_RDEV_ID(card);
+               readno += (strlen(readno) - 4);
+               card_dev_no = simple_strtoul(readno, &endp, 16);
+               if (card_dev_no == temp_dev_no) {
+                       ndev = card->dev;
+                       break;
+               }
+       }
+       read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+       return ndev;
+}
+
+int
+qeth_osn_register(unsigned char *read_dev_no,
+                 struct net_device **dev,
+                 int (*assist_cb)(struct net_device *, void *),
+                 int (*data_cb)(struct sk_buff *))
+{
+       struct qeth_card * card;
+
+       QETH_DBF_TEXT(trace, 2, "osnreg");
+       *dev = qeth_netdev_by_devno(read_dev_no);
+       if (*dev == NULL)
+               return -ENODEV;
+       card = (struct qeth_card *)(*dev)->priv;
+       if (!card)
+               return -ENODEV;
+       if ((assist_cb == NULL) || (data_cb == NULL))
+               return -EINVAL;
+       card->osn_info.assist_cb = assist_cb;
+       card->osn_info.data_cb = data_cb;
+       return 0;
+}
+
+void
+qeth_osn_deregister(struct net_device * dev)
+{
+       struct qeth_card *card;
+
+       QETH_DBF_TEXT(trace, 2, "osndereg");
+       if (!dev)
+               return;
+       card = (struct qeth_card *)dev->priv;
+       if (!card)
+               return;
+       card->osn_info.assist_cb = NULL;
+       card->osn_info.data_cb = NULL;
+       return;
+}
+                                          
 static void
 qeth_delete_mc_addresses(struct qeth_card *card)
 {
@@ -5700,6 +5901,12 @@ qeth_layer2_set_mac_address(struct net_device *dev, void *p)
                QETH_DBF_TEXT(trace, 3, "setmcLY3");
                return -EOPNOTSUPP;
        }
+       if (card->info.type == QETH_CARD_TYPE_OSN) {
+               PRINT_WARN("Setting MAC address on %s is not supported.\n",
+                          dev->name);
+               QETH_DBF_TEXT(trace, 3, "setmcOSN");
+               return -EOPNOTSUPP;
+       }
        QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
        QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
        rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
@@ -6076,9 +6283,8 @@ qeth_netdev_init(struct net_device *dev)
                        qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
        dev->addr_len = OSA_ADDR_LEN;
        dev->mtu = card->info.initial_mtu;
-
-       SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
-
+       if (card->info.type != QETH_CARD_TYPE_OSN)
+               SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
        SET_MODULE_OWNER(dev);
        return 0;
 }
@@ -6095,6 +6301,7 @@ qeth_init_func_level(struct qeth_card *card)
                                        QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
        } else {
                if (card->info.type == QETH_CARD_TYPE_IQD)
+               /*FIXME:why do we have same values for  dis and ena for osae??? */
                        card->info.func_level =
                                QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
                else
@@ -6124,7 +6331,7 @@ retry:
                ccw_device_set_online(CARD_WDEV(card));
                ccw_device_set_online(CARD_DDEV(card));
        }
-       rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
+       rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
        if (rc == -ERESTARTSYS) {
                QETH_DBF_TEXT(setup, 2, "break1");
                return rc;
@@ -6176,8 +6383,8 @@ retry:
        card->dev = qeth_get_netdevice(card->info.type,
                                       card->info.link_type);
        if (!card->dev){
-               qeth_qdio_clear_card(card, card->info.type ==
-                                    QETH_CARD_TYPE_OSAE);
+               qeth_qdio_clear_card(card, card->info.type !=
+                                    QETH_CARD_TYPE_IQD);
                rc = -ENODEV;
                QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
                goto out;
@@ -7084,6 +7291,8 @@ qeth_softsetup_card(struct qeth_card *card)
                        return rc;
        } else
                card->lan_online = 1;
+       if (card->info.type==QETH_CARD_TYPE_OSN)
+               goto out;
        if (card->options.layer2) {
                card->dev->features |=
                        NETIF_F_HW_VLAN_FILTER |
@@ -7255,7 +7464,8 @@ qeth_stop_card(struct qeth_card *card, int recovery_mode)
        if (card->read.state == CH_STATE_UP &&
            card->write.state == CH_STATE_UP &&
            (card->state == CARD_STATE_UP)) {
-               if(recovery_mode) {
+               if (recovery_mode && 
+                   card->info.type != QETH_CARD_TYPE_OSN) {
                        qeth_stop(card->dev);
                } else {
                        rtnl_lock();
@@ -7437,7 +7647,8 @@ qeth_start_again(struct qeth_card *card, int recovery_mode)
 {
        QETH_DBF_TEXT(setup ,2, "startag");
 
-       if(recovery_mode) {
+       if (recovery_mode && 
+           card->info.type != QETH_CARD_TYPE_OSN) {
                qeth_open(card->dev);
        } else {
                rtnl_lock();
@@ -7469,33 +7680,36 @@ qeth_start_again(struct qeth_card *card, int recovery_mode)
 static void qeth_make_parameters_consistent(struct qeth_card *card)
 {
 
-        if (card->options.layer2) {
-                if (card->info.type == QETH_CARD_TYPE_IQD) {
-                        PRINT_ERR("Device %s does not support " \
-                                  "layer 2 functionality. "  \
-                                  "Ignoring layer2 option.\n",CARD_BUS_ID(card));
-                }
-                IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
-                                 "Routing options are");
+       if (card->options.layer2 == 0)
+               return;
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return;
+       if (card->info.type == QETH_CARD_TYPE_IQD) {
+                       PRINT_ERR("Device %s does not support layer 2 functionality." \
+                         " Ignoring layer2 option.\n",CARD_BUS_ID(card));
+                       card->options.layer2 = 0;
+               return;
+       }
+               IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
+                                "Routing options are");
 #ifdef CONFIG_QETH_IPV6
-                IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
-                                 "Routing options are");
+               IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
+                                "Routing options are");
 #endif
-                IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
-                                QETH_CHECKSUM_DEFAULT,
-                                "Checksumming options are");
-                IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
-                                 QETH_TR_BROADCAST_ALLRINGS,
-                                 "Broadcast mode options are");
-                IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
-                                 QETH_TR_MACADDR_NONCANONICAL,
-                                 "Canonical MAC addr options are");
-                IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
-                                "Broadcast faking options are");
-                IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
-                                 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
-                IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
-        }
+               IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
+                               QETH_CHECKSUM_DEFAULT,
+                               "Checksumming options are");
+               IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
+                                QETH_TR_BROADCAST_ALLRINGS,
+                                "Broadcast mode options are");
+               IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
+                                QETH_TR_MACADDR_NONCANONICAL,
+                                "Canonical MAC addr options are");
+               IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
+                        "Broadcast faking options are");
+               IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
+                                DEFAULT_ADD_HHLEN,"Option add_hhlen is");
+        IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
 }
 
 
@@ -7525,8 +7739,7 @@ __qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                return -EIO;
        }
 
-       if (card->options.layer2)
-               qeth_make_parameters_consistent(card);
+       qeth_make_parameters_consistent(card);
 
        if ((rc = qeth_hardsetup_card(card))){
                QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
@@ -7585,6 +7798,7 @@ qeth_set_online(struct ccwgroup_device *gdev)
 static struct ccw_device_id qeth_ids[] = {
        {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
        {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
+       {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN},
        {},
 };
 MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -8329,6 +8543,9 @@ again:
        printk("qeth: removed\n");
 }
 
+EXPORT_SYMBOL(qeth_osn_register);
+EXPORT_SYMBOL(qeth_osn_deregister);
+EXPORT_SYMBOL(qeth_osn_assist);
 module_init(qeth_init);
 module_exit(qeth_exit);
 MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
index f685ecc7da99dc8dbca4ac61a5ab63cfb6c7683a..30e053d3cac2932ef90c679c5d3db4024c8bd660 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/cio.h>
 #include "qeth_mpc.h"
 
-const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $";
+const char *VERSION_QETH_MPC_C = "$Revision: 1.12 $";
 
 unsigned char IDX_ACTIVATE_READ[]={
        0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
@@ -138,7 +138,9 @@ unsigned char IPA_PDU_HEADER[]={
                sizeof(struct qeth_ipa_cmd)%256,
        0x00,
                sizeof(struct qeth_ipa_cmd)/256,
-               sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77,
+               sizeof(struct qeth_ipa_cmd)%256,
+       0x05,
+       0x77,0x77,0x77,0x77,
        0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
        0x01,0x00,
                sizeof(struct qeth_ipa_cmd)/256,
index 3d916b5c5d09c4a4f0b2b0d697938481b8cd9dc6..7edc5f1fc0d24da927ac81bc2ebe01fe1b57fc79 100644 (file)
@@ -46,13 +46,16 @@ extern unsigned char IPA_PDU_HEADER[];
 /* IP Assist related definitions                                             */
 /*****************************************************************************/
 #define IPA_CMD_INITIATOR_HOST  0x00
-#define IPA_CMD_INITIATOR_HYDRA 0x01
+#define IPA_CMD_INITIATOR_OSA   0x01
+#define IPA_CMD_INITIATOR_HOST_REPLY  0x80
+#define IPA_CMD_INITIATOR_OSA_REPLY   0x81
 #define IPA_CMD_PRIM_VERSION_NO 0x01
 
 enum qeth_card_types {
        QETH_CARD_TYPE_UNKNOWN = 0,
        QETH_CARD_TYPE_OSAE    = 10,
        QETH_CARD_TYPE_IQD     = 1234,
+       QETH_CARD_TYPE_OSN     = 11,
 };
 
 #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -61,6 +64,7 @@ enum qeth_link_types {
        QETH_LINK_TYPE_FAST_ETH     = 0x01,
        QETH_LINK_TYPE_HSTR         = 0x02,
        QETH_LINK_TYPE_GBIT_ETH     = 0x03,
+       QETH_LINK_TYPE_OSN          = 0x04,
        QETH_LINK_TYPE_10GBIT_ETH   = 0x10,
        QETH_LINK_TYPE_LANE_ETH100  = 0x81,
        QETH_LINK_TYPE_LANE_TR      = 0x82,
@@ -111,6 +115,9 @@ enum qeth_ipa_cmds {
        IPA_CMD_DELGMAC               = 0x24,
        IPA_CMD_SETVLAN               = 0x25,
        IPA_CMD_DELVLAN               = 0x26,
+       IPA_CMD_SETCCID               = 0x41,
+       IPA_CMD_DELCCID               = 0x42,
+       IPA_CMD_MODCCID               = 0x43,
        IPA_CMD_SETIP                 = 0xb1,
        IPA_CMD_DELIP                 = 0xb7,
        IPA_CMD_QIPASSIST             = 0xb2,
@@ -437,8 +444,9 @@ enum qeth_ipa_arp_return_codes {
 #define QETH_ARP_DATA_SIZE 3968
 #define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
 /* Helper functions */
-#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST)
-
+#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
+                          (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+       
 /*****************************************************************************/
 /* END OF   IP Assist related definitions                                    */
 /*****************************************************************************/
@@ -483,6 +491,7 @@ extern unsigned char ULP_ENABLE[];
 /* Layer 2 defintions */
 #define QETH_PROT_LAYER2 0x08
 #define QETH_PROT_TCPIP  0x03
+#define QETH_PROT_OSN2   0x0a     
 #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50)
 #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19)
 
index dda105b73063d09a7091b25c627c1c28e05985ed..f91a02db57437b57f0603294d8da9fc9e07eb222 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *
- * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.54 $)
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.55 $)
  *
  * Linux on zSeries OSA Express and HiperSockets support
  * This file contains code related to sysfs.
@@ -20,7 +20,7 @@
 #include "qeth_mpc.h"
 #include "qeth_fs.h"
 
-const char *VERSION_QETH_SYS_C = "$Revision: 1.54 $";
+const char *VERSION_QETH_SYS_C = "$Revision: 1.55 $";
 
 /*****************************************************************************/
 /*                                                                           */
@@ -937,6 +937,19 @@ static struct attribute_group qeth_device_attr_group = {
        .attrs = (struct attribute **)qeth_device_attrs,
 };
 
+static struct device_attribute * qeth_osn_device_attrs[] = {
+       &dev_attr_state,
+       &dev_attr_chpid,
+       &dev_attr_if_name,
+       &dev_attr_card_type,
+       &dev_attr_buffer_count,
+       &dev_attr_recover,
+       NULL,
+};
+
+static struct attribute_group qeth_osn_device_attr_group = {
+       .attrs = (struct attribute **)qeth_osn_device_attrs,
+};
 
 #define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store)                      \
 struct device_attribute dev_attr_##_id = {                                  \
@@ -1667,7 +1680,12 @@ int
 qeth_create_device_attributes(struct device *dev)
 {
        int ret;
+       struct qeth_card *card = dev->driver_data;
 
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return sysfs_create_group(&dev->kobj,
+                                         &qeth_osn_device_attr_group);
+       
        if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
                return ret;
        if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
@@ -1693,6 +1711,12 @@ qeth_create_device_attributes(struct device *dev)
 void
 qeth_remove_device_attributes(struct device *dev)
 {
+       struct qeth_card *card = dev->driver_data;
+
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return sysfs_remove_group(&dev->kobj,
+                                         &qeth_osn_device_attr_group);
+                     
        sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
        sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
        sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
index 3ee9b8b33be0deeeea353b4e3b46860b6d9a990b..9c9f162bd6edc6ce13442d522390a93cc29b2401 100644 (file)
@@ -489,11 +489,11 @@ config SCSI_SATA_NV
 
          If unsure, say N.
 
-config SCSI_SATA_PROMISE
-       tristate "Promise SATA TX2/TX4 support"
+config SCSI_PDC_ADMA
+       tristate "Pacific Digital ADMA support"
        depends on SCSI_SATA && PCI
        help
-         This option enables support for Promise Serial ATA TX2/TX4.
+         This option enables support for Pacific Digital ADMA controllers
 
          If unsure, say N.
 
@@ -505,6 +505,14 @@ config SCSI_SATA_QSTOR
 
          If unsure, say N.
 
+config SCSI_SATA_PROMISE
+       tristate "Promise SATA TX2/TX4 support"
+       depends on SCSI_SATA && PCI
+       help
+         This option enables support for Promise Serial ATA TX2/TX4.
+
+         If unsure, say N.
+
 config SCSI_SATA_SX4
        tristate "Promise SATA SX4 support"
        depends on SCSI_SATA && PCI && EXPERIMENTAL
@@ -521,6 +529,14 @@ config SCSI_SATA_SIL
 
          If unsure, say N.
 
+config SCSI_SATA_SIL24
+       tristate "Silicon Image 3124/3132 SATA support"
+       depends on SCSI_SATA && PCI && EXPERIMENTAL
+       help
+         This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+         If unsure, say N.
+
 config SCSI_SATA_SIS
        tristate "SiS 964/180 SATA support"
        depends on SCSI_SATA && PCI && EXPERIMENTAL
index 48529d180ca8869397ce903d154ffe1f85bbdc4e..2d4439826c08d96d006dc11fe70c5b5e8313443d 100644 (file)
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
 obj-$(CONFIG_SCSI_SATA_PROMISE)        += libata.o sata_promise.o
 obj-$(CONFIG_SCSI_SATA_QSTOR)  += libata.o sata_qstor.o
 obj-$(CONFIG_SCSI_SATA_SIL)    += libata.o sata_sil.o
+obj-$(CONFIG_SCSI_SATA_SIL24)  += libata.o sata_sil24.o
 obj-$(CONFIG_SCSI_SATA_VIA)    += libata.o sata_via.o
 obj-$(CONFIG_SCSI_SATA_VITESSE)        += libata.o sata_vsc.o
 obj-$(CONFIG_SCSI_SATA_SIS)    += libata.o sata_sis.o
@@ -137,6 +138,7 @@ obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
 obj-$(CONFIG_SCSI_SATA_NV)     += libata.o sata_nv.o
 obj-$(CONFIG_SCSI_SATA_ULI)    += libata.o sata_uli.o
 obj-$(CONFIG_SCSI_SATA_MV)     += libata.o sata_mv.o
+obj-$(CONFIG_SCSI_PDC_ADMA)    += libata.o pdc_adma.o
 
 obj-$(CONFIG_ARM)              += arm/
 
index c2c8fa828e24e9d227aaf93a5efc4367dab40872..fe8187d6f58be248bdcf2f5ab053ff997ecb2dac 100644 (file)
@@ -216,7 +216,7 @@ static Scsi_Host_Template ahci_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations ahci_ops = {
+static const struct ata_port_operations ahci_ops = {
        .port_disable           = ata_port_disable,
 
        .check_status           = ahci_check_status,
@@ -407,7 +407,7 @@ static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
                return 0xffffffffU;
        }
 
-       return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+       return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -425,7 +425,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
                return;
        }
 
-       writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+       writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 static void ahci_phy_reset(struct ata_port *ap)
@@ -453,14 +453,14 @@ static void ahci_phy_reset(struct ata_port *ap)
 
 static u8 ahci_check_status(struct ata_port *ap)
 {
-       void *mmio = (void *) ap->ioaddr.cmd_addr;
+       void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
 
        return readl(mmio + PORT_TFDATA) & 0xFF;
 }
 
 static u8 ahci_check_err(struct ata_port *ap)
 {
-       void *mmio = (void *) ap->ioaddr.cmd_addr;
+       void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
 
        return (readl(mmio + PORT_TFDATA) >> 8) & 0xFF;
 }
@@ -672,17 +672,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
 
         for (i = 0; i < host_set->n_ports; i++) {
                struct ata_port *ap;
-               u32 tmp;
 
-               VPRINTK("port %u\n", i);
+               if (!(irq_stat & (1 << i)))
+                       continue;
+
                ap = host_set->ports[i];
-               tmp = irq_stat & (1 << i);
-               if (tmp && ap) {
+               if (ap) {
                        struct ata_queued_cmd *qc;
                        qc = ata_qc_from_tag(ap, ap->active_tag);
-                       if (ahci_host_intr(ap, qc))
-                               irq_ack |= (1 << i);
+                       if (!ahci_host_intr(ap, qc))
+                               if (ata_ratelimit()) {
+                                       struct pci_dev *pdev =
+                                         to_pci_dev(ap->host_set->dev);
+                                       printk(KERN_WARNING
+                                         "ahci(%s): unhandled interrupt on port %u\n",
+                                         pci_name(pdev), i);
+                               }
+
+                       VPRINTK("port %u\n", i);
+               } else {
+                       VPRINTK("port %u (no irq)\n", i);
+                       if (ata_ratelimit()) {
+                               struct pci_dev *pdev =
+                                 to_pci_dev(ap->host_set->dev);
+                               printk(KERN_WARNING
+                                 "ahci(%s): interrupt on disabled port %u\n",
+                                 pci_name(pdev), i);
+                       }
                }
+
+               irq_ack |= (1 << i);
        }
 
        if (irq_ack) {
index d71cef767cec7849c09a6217234872f370a8926a..be021478f41628ddc53f95adf0150534ce937454 100644 (file)
@@ -147,7 +147,7 @@ static Scsi_Host_Template piix_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations piix_pata_ops = {
+static const struct ata_port_operations piix_pata_ops = {
        .port_disable           = ata_port_disable,
        .set_piomode            = piix_set_piomode,
        .set_dmamode            = piix_set_dmamode,
@@ -177,7 +177,7 @@ static struct ata_port_operations piix_pata_ops = {
        .host_stop              = ata_host_stop,
 };
 
-static struct ata_port_operations piix_sata_ops = {
+static const struct ata_port_operations piix_sata_ops = {
        .port_disable           = ata_port_disable,
 
        .tf_load                = ata_tf_load,
index c10e45b94b6269bd8d70bfdecf6519ffe4fff9c1..3d13fdee4fc26e2f53f237e316853691770d3189 100644 (file)
@@ -1357,7 +1357,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
 
        for (i = 0; i < shost->can_queue; i++) {
                size_t sz = shost->sg_tablesize *sizeof(struct sg_list);
-               unsigned int gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
+               gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
                ha->cp[i].sglist = kmalloc(sz, gfp_mask);
                if (!ha->cp[i].sglist) {
                        printk
index 02fe371b0ab87142b718e2d707bf365427f08ea9..f24d84538fd56ab5c1d2cdd8df3c509fac8e5092 100644 (file)
@@ -287,7 +287,8 @@ static void scsi_host_dev_release(struct device *dev)
 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 {
        struct Scsi_Host *shost;
-       int gfp_mask = GFP_KERNEL, rval;
+       gfp_t gfp_mask = GFP_KERNEL;
+       int rval;
 
        if (sht->unchecked_isa_dma && privsize)
                gfp_mask |= __GFP_DMA;
index e5b01997117a9965249cb734b7c00f43b7d9bbd8..f53d7b8ac33f41fcd488cdfb34ec88b3b0658339 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/completion.h>
 #include <linux/suspend.h>
 #include <linux/workqueue.h>
+#include <linux/jiffies.h>
 #include <scsi/scsi.h>
 #include "scsi.h"
 #include "scsi_priv.h"
 static unsigned int ata_busy_sleep (struct ata_port *ap,
                                    unsigned long tmout_pat,
                                    unsigned long tmout);
+static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
 static void ata_set_mode(struct ata_port *ap);
 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
-static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
 static int fgb(u32 bitmap);
-static int ata_choose_xfer_mode(struct ata_port *ap,
+static int ata_choose_xfer_mode(const struct ata_port *ap,
                                u8 *xfer_mode_out,
                                unsigned int *xfer_shift_out);
-static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
 static void __ata_qc_complete(struct ata_queued_cmd *qc);
 
 static unsigned int ata_unique_id = 1;
@@ -85,7 +87,7 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
 /**
- *     ata_tf_load - send taskfile registers to host controller
+ *     ata_tf_load_pio - send taskfile registers to host controller
  *     @ap: Port to which output is sent
  *     @tf: ATA taskfile register set
  *
@@ -95,7 +97,7 @@ MODULE_VERSION(DRV_VERSION);
  *     Inherited from caller.
  */
 
-static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -153,7 +155,7 @@ static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
  *     Inherited from caller.
  */
 
-static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -222,7 +224,7 @@ static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  *     LOCKING:
  *     Inherited from caller.
  */
-void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        if (ap->flags & ATA_FLAG_MMIO)
                ata_tf_load_mmio(ap, tf);
@@ -242,7 +244,7 @@ void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
  *     spin_lock_irqsave(host_set lock)
  */
 
-static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
 
@@ -263,7 +265,7 @@ static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
  *     spin_lock_irqsave(host_set lock)
  */
 
-static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
 
@@ -283,7 +285,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  *     LOCKING:
  *     spin_lock_irqsave(host_set lock)
  */
-void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        if (ap->flags & ATA_FLAG_MMIO)
                ata_exec_command_mmio(ap, tf);
@@ -303,7 +305,7 @@ void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
  *     Obtains host_set lock.
  */
 
-static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
+static inline void ata_exec(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        unsigned long flags;
 
@@ -326,7 +328,7 @@ static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
  *     Obtains host_set lock.
  */
 
-static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_tf_to_host(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        ap->ops->tf_load(ap, tf);
 
@@ -346,7 +348,7 @@ static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
  *     spin_lock_irqsave(host_set lock)
  */
 
-void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        ap->ops->tf_load(ap, tf);
        ap->ops->exec_command(ap, tf);
@@ -556,7 +558,7 @@ u8 ata_chk_err(struct ata_port *ap)
  *     Inherited from caller.
  */
 
-void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
 {
        fis[0] = 0x27;  /* Register - Host to Device FIS */
        fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
@@ -597,7 +599,7 @@ void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
  *     Inherited from caller.
  */
 
-void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
 {
        tf->command     = fis[2];       /* status */
        tf->feature     = fis[3];       /* error */
@@ -615,79 +617,53 @@ void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
        tf->hob_nsect   = fis[13];
 }
 
-/**
- *     ata_prot_to_cmd - determine which read/write opcodes to use
- *     @protocol: ATA_PROT_xxx taskfile protocol
- *     @lba48: true is lba48 is present
- *
- *     Given necessary input, determine which read/write commands
- *     to use to transfer data.
- *
- *     LOCKING:
- *     None.
- */
-static int ata_prot_to_cmd(int protocol, int lba48)
-{
-       int rcmd = 0, wcmd = 0;
-
-       switch (protocol) {
-       case ATA_PROT_PIO:
-               if (lba48) {
-                       rcmd = ATA_CMD_PIO_READ_EXT;
-                       wcmd = ATA_CMD_PIO_WRITE_EXT;
-               } else {
-                       rcmd = ATA_CMD_PIO_READ;
-                       wcmd = ATA_CMD_PIO_WRITE;
-               }
-               break;
-
-       case ATA_PROT_DMA:
-               if (lba48) {
-                       rcmd = ATA_CMD_READ_EXT;
-                       wcmd = ATA_CMD_WRITE_EXT;
-               } else {
-                       rcmd = ATA_CMD_READ;
-                       wcmd = ATA_CMD_WRITE;
-               }
-               break;
-
-       default:
-               return -1;
-       }
-
-       return rcmd | (wcmd << 8);
-}
+static const u8 ata_rw_cmds[] = {
+       /* pio multi */
+       ATA_CMD_READ_MULTI,
+       ATA_CMD_WRITE_MULTI,
+       ATA_CMD_READ_MULTI_EXT,
+       ATA_CMD_WRITE_MULTI_EXT,
+       /* pio */
+       ATA_CMD_PIO_READ,
+       ATA_CMD_PIO_WRITE,
+       ATA_CMD_PIO_READ_EXT,
+       ATA_CMD_PIO_WRITE_EXT,
+       /* dma */
+       ATA_CMD_READ,
+       ATA_CMD_WRITE,
+       ATA_CMD_READ_EXT,
+       ATA_CMD_WRITE_EXT
+};
 
 /**
- *     ata_dev_set_protocol - set taskfile protocol and r/w commands
- *     @dev: device to examine and configure
+ *     ata_rwcmd_protocol - set taskfile r/w commands and protocol
+ *     @qc: command to examine and configure
  *
- *     Examine the device configuration, after we have
- *     read the identify-device page and configured the
- *     data transfer mode.  Set internal state related to
- *     the ATA taskfile protocol (pio, pio mult, dma, etc.)
- *     and calculate the proper read/write commands to use.
+ *     Examine the device configuration and tf->flags to calculate 
+ *     the proper read/write commands and protocol to use.
  *
  *     LOCKING:
  *     caller.
  */
-static void ata_dev_set_protocol(struct ata_device *dev)
+void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
 {
-       int pio = (dev->flags & ATA_DFLAG_PIO);
-       int lba48 = (dev->flags & ATA_DFLAG_LBA48);
-       int proto, cmd;
+       struct ata_taskfile *tf = &qc->tf;
+       struct ata_device *dev = qc->dev;
 
-       if (pio)
-               proto = dev->xfer_protocol = ATA_PROT_PIO;
-       else
-               proto = dev->xfer_protocol = ATA_PROT_DMA;
+       int index, lba48, write;
+       lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
+       write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
 
-       cmd = ata_prot_to_cmd(proto, lba48);
-       if (cmd < 0)
-               BUG();
+       if (dev->flags & ATA_DFLAG_PIO) {
+               tf->protocol = ATA_PROT_PIO;
+               index = dev->multi_count ? 0 : 4;
+       } else {
+               tf->protocol = ATA_PROT_DMA;
+               index = 8;
+       }
 
-       dev->read_cmd = cmd & 0xff;
-       dev->write_cmd = (cmd >> 8) & 0xff;
+       tf->command = ata_rw_cmds[index + lba48 + write];
 }
 
 static const char * xfer_mode_str[] = {
@@ -869,7 +845,7 @@ static unsigned int ata_devchk(struct ata_port *ap,
  *     the event of failure.
  */
 
-unsigned int ata_dev_classify(struct ata_taskfile *tf)
+unsigned int ata_dev_classify(const struct ata_taskfile *tf)
 {
        /* Apple's open source Darwin code hints that some devices only
         * put a proper signature into the LBA mid/high registers,
@@ -961,7 +937,7 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
  *     caller.
  */
 
-void ata_dev_id_string(u16 *id, unsigned char *s,
+void ata_dev_id_string(const u16 *id, unsigned char *s,
                       unsigned int ofs, unsigned int len)
 {
        unsigned int c;
@@ -1078,7 +1054,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
  *     caller.
  */
 
-static inline void ata_dump_id(struct ata_device *dev)
+static inline void ata_dump_id(const struct ata_device *dev)
 {
        DPRINTK("49==0x%04x  "
                "53==0x%04x  "
@@ -1106,6 +1082,31 @@ static inline void ata_dump_id(struct ata_device *dev)
                dev->id[93]);
 }
 
+/*
+ *     Compute the PIO modes available for this device. This is not as
+ *     trivial as it seems if we must consider early devices correctly.
+ *
+ *     FIXME: pre IDE drive timing (do we care ?). 
+ */
+
+static unsigned int ata_pio_modes(const struct ata_device *adev)
+{
+       u16 modes;
+
+       /* Usual case. Word 53 indicates word 88 is valid */
+       if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+               modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
+               modes <<= 3;
+               modes |= 0x7;
+               return modes;
+       }
+
+       /* If word 88 isn't valid then Word 51 holds the PIO timing number
+          for the maximum. Turn it into a mask and return it */
+       modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+       return modes;
+}
+
 /**
  *     ata_dev_identify - obtain IDENTIFY x DEVICE page
  *     @ap: port on which device we wish to probe resides
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev)
 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
 {
        struct ata_device *dev = &ap->device[device];
-       unsigned int i;
+       unsigned int major_version;
        u16 tmp;
        unsigned long xfer_modes;
        u8 status;
@@ -1229,9 +1230,9 @@ retry:
         * common ATA, ATAPI feature tests
         */
 
-       /* we require LBA and DMA support (bits 8 & 9 of word 49) */
-       if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
-               printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
+       /* we require DMA support (bits 8 of word 49) */
+       if (!ata_id_has_dma(dev->id)) {
+               printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
                goto err_out_nosup;
        }
 
@@ -1239,10 +1240,8 @@ retry:
        xfer_modes = dev->id[ATA_ID_UDMA_MODES];
        if (!xfer_modes)
                xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
-       if (!xfer_modes) {
-               xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
-               xfer_modes |= (0x7 << ATA_SHIFT_PIO);
-       }
+       if (!xfer_modes)
+               xfer_modes = ata_pio_modes(dev);
 
        ata_dump_id(dev);
 
@@ -1251,32 +1250,75 @@ retry:
                if (!ata_id_is_ata(dev->id))    /* sanity check */
                        goto err_out_nosup;
 
+               /* get major version */
                tmp = dev->id[ATA_ID_MAJOR_VER];
-               for (i = 14; i >= 1; i--)
-                       if (tmp & (1 << i))
+               for (major_version = 14; major_version >= 1; major_version--)
+                       if (tmp & (1 << major_version))
                                break;
 
-               /* we require at least ATA-3 */
-               if (i < 3) {
-                       printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
-                       goto err_out_nosup;
+               /*
+                * The exact sequence expected by certain pre-ATA4 drives is:
+                * SRST RESET
+                * IDENTIFY
+                * INITIALIZE DEVICE PARAMETERS
+                * anything else..
+                * Some drives were very specific about that exact sequence.
+                */
+               if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
+                       ata_dev_init_params(ap, dev);
+
+                       /* current CHS translation info (id[53-58]) might be
+                        * changed. reread the identify device info.
+                        */
+                       ata_dev_reread_id(ap, dev);
                }
 
-               if (ata_id_has_lba48(dev->id)) {
-                       dev->flags |= ATA_DFLAG_LBA48;
-                       dev->n_sectors = ata_id_u64(dev->id, 100);
-               } else {
-                       dev->n_sectors = ata_id_u32(dev->id, 60);
+               if (ata_id_has_lba(dev->id)) {
+                       dev->flags |= ATA_DFLAG_LBA;
+
+                       if (ata_id_has_lba48(dev->id)) {
+                               dev->flags |= ATA_DFLAG_LBA48;
+                               dev->n_sectors = ata_id_u64(dev->id, 100);
+                       } else {
+                               dev->n_sectors = ata_id_u32(dev->id, 60);
+                       }
+
+                       /* print device info to dmesg */
+                       printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
+                              ap->id, device,
+                              major_version,
+                              ata_mode_string(xfer_modes),
+                              (unsigned long long)dev->n_sectors,
+                              dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
+               } else { 
+                       /* CHS */
+
+                       /* Default translation */
+                       dev->cylinders  = dev->id[1];
+                       dev->heads      = dev->id[3];
+                       dev->sectors    = dev->id[6];
+                       dev->n_sectors  = dev->cylinders * dev->heads * dev->sectors;
+
+                       if (ata_id_current_chs_valid(dev->id)) {
+                               /* Current CHS translation is valid. */
+                               dev->cylinders = dev->id[54];
+                               dev->heads     = dev->id[55];
+                               dev->sectors   = dev->id[56];
+                               
+                               dev->n_sectors = ata_id_u32(dev->id, 57);
+                       }
+
+                       /* print device info to dmesg */
+                       printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
+                              ap->id, device,
+                              major_version,
+                              ata_mode_string(xfer_modes),
+                              (unsigned long long)dev->n_sectors,
+                              (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
+
                }
 
                ap->host->max_cmd_len = 16;
-
-               /* print device info to dmesg */
-               printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
-                      ap->id, device,
-                      ata_mode_string(xfer_modes),
-                      (unsigned long long)dev->n_sectors,
-                      dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
        }
 
        /* ATAPI-specific feature tests */
@@ -1310,7 +1352,7 @@ err_out:
 }
 
 
-static inline u8 ata_dev_knobble(struct ata_port *ap)
+static inline u8 ata_dev_knobble(const struct ata_port *ap)
 {
        return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
 }
@@ -1496,7 +1538,153 @@ void ata_port_disable(struct ata_port *ap)
        ap->flags |= ATA_FLAG_PORT_DISABLED;
 }
 
-static struct {
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for PIO 5, which is a nonstandard extension and UDMA6, which
+ * is currently supported only by Maxtor drives. 
+ */
+
+static const struct ata_timing ata_timing[] = {
+
+       { XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
+       { XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
+       { XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
+       { XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
+
+       { XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
+       { XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
+       { XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
+
+/*     { XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
+                                          
+       { XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
+       { XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
+       { XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
+                                          
+       { XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+       { XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
+       { XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
+
+/*     { XFER_PIO_5,     20,  50,  30, 100,  50,  30, 100,   0 }, */
+       { XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
+       { XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
+
+       { XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
+       { XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
+       { XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
+
+/*     { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
+
+       { 0xFF }
+};
+
+#define ENOUGH(v,unit)         (((v)-1)/(unit)+1)
+#define EZ(v,unit)             ((v)?ENOUGH(v,unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
+{
+       q->setup   = EZ(t->setup   * 1000,  T);
+       q->act8b   = EZ(t->act8b   * 1000,  T);
+       q->rec8b   = EZ(t->rec8b   * 1000,  T);
+       q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
+       q->active  = EZ(t->active  * 1000,  T);
+       q->recover = EZ(t->recover * 1000,  T);
+       q->cycle   = EZ(t->cycle   * 1000,  T);
+       q->udma    = EZ(t->udma    * 1000, UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+                     struct ata_timing *m, unsigned int what)
+{
+       if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
+       if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
+       if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
+       if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
+       if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
+       if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+       if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
+       if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
+}
+
+static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
+{
+       const struct ata_timing *t;
+
+       for (t = ata_timing; t->mode != speed; t++)
+               if (t->mode == 0xFF)
+                       return NULL;
+       return t; 
+}
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+                      struct ata_timing *t, int T, int UT)
+{
+       const struct ata_timing *s;
+       struct ata_timing p;
+
+       /*
+        * Find the mode. 
+       */
+
+       if (!(s = ata_timing_find_mode(speed)))
+               return -EINVAL;
+
+       /*
+        * If the drive is an EIDE drive, it can tell us it needs extended
+        * PIO/MW_DMA cycle timing.
+        */
+
+       if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
+               memset(&p, 0, sizeof(p));
+               if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+                       if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
+                                           else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
+               } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
+                       p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
+               }
+               ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+       }
+
+       /*
+        * Convert the timing to bus clock counts.
+        */
+
+       ata_timing_quantize(s, t, T, UT);
+
+       /*
+        * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
+        * and some other commands. We have to ensure that the DMA cycle timing is
+        * slower/equal than the fastest PIO timing.
+        */
+
+       if (speed > XFER_PIO_4) {
+               ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+               ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+       }
+
+       /*
+        * Lenghten active & recovery time so that cycle time is correct.
+        */
+
+       if (t->act8b + t->rec8b < t->cyc8b) {
+               t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+               t->rec8b = t->cyc8b - t->act8b;
+       }
+
+       if (t->active + t->recover < t->cycle) {
+               t->active += (t->cycle - (t->active + t->recover)) / 2;
+               t->recover = t->cycle - t->active;
+       }
+
+       return 0;
+}
+
+static const struct {
        unsigned int shift;
        u8 base;
 } xfer_mode_classes[] = {
@@ -1603,7 +1791,7 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
  */
 static void ata_set_mode(struct ata_port *ap)
 {
-       unsigned int i, xfer_shift;
+       unsigned int xfer_shift;
        u8 xfer_mode;
        int rc;
 
@@ -1632,11 +1820,6 @@ static void ata_set_mode(struct ata_port *ap)
        if (ap->ops->post_set_mode)
                ap->ops->post_set_mode(ap);
 
-       for (i = 0; i < 2; i++) {
-               struct ata_device *dev = &ap->device[i];
-               ata_dev_set_protocol(dev);
-       }
-
        return;
 
 err_out:
@@ -1910,7 +2093,8 @@ err_out:
        DPRINTK("EXIT\n");
 }
 
-static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
+static void ata_pr_blacklisted(const struct ata_port *ap,
+                              const struct ata_device *dev)
 {
        printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
                ap->id, dev->devno);
@@ -1948,7 +2132,7 @@ static const char * ata_dma_blacklist [] = {
        "_NEC DV5800A",
 };
 
-static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
+static int ata_dma_blacklisted(const struct ata_device *dev)
 {
        unsigned char model_num[40];
        char *s;
@@ -1973,9 +2157,9 @@ static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
        return 0;
 }
 
-static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
 {
-       struct ata_device *master, *slave;
+       const struct ata_device *master, *slave;
        unsigned int mask;
 
        master = &ap->device[0];
@@ -1987,14 +2171,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
                mask = ap->udma_mask;
                if (ata_dev_present(master)) {
                        mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
-                       if (ata_dma_blacklisted(ap, master)) {
+                       if (ata_dma_blacklisted(master)) {
                                mask = 0;
                                ata_pr_blacklisted(ap, master);
                        }
                }
                if (ata_dev_present(slave)) {
                        mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
-                       if (ata_dma_blacklisted(ap, slave)) {
+                       if (ata_dma_blacklisted(slave)) {
                                mask = 0;
                                ata_pr_blacklisted(ap, slave);
                        }
@@ -2004,14 +2188,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
                mask = ap->mwdma_mask;
                if (ata_dev_present(master)) {
                        mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
-                       if (ata_dma_blacklisted(ap, master)) {
+                       if (ata_dma_blacklisted(master)) {
                                mask = 0;
                                ata_pr_blacklisted(ap, master);
                        }
                }
                if (ata_dev_present(slave)) {
                        mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
-                       if (ata_dma_blacklisted(ap, slave)) {
+                       if (ata_dma_blacklisted(slave)) {
                                mask = 0;
                                ata_pr_blacklisted(ap, slave);
                        }
@@ -2075,7 +2259,7 @@ static int fgb(u32 bitmap)
  *     Zero on success, negative on error.
  */
 
-static int ata_choose_xfer_mode(struct ata_port *ap,
+static int ata_choose_xfer_mode(const struct ata_port *ap,
                                u8 *xfer_mode_out,
                                unsigned int *xfer_shift_out)
 {
@@ -2143,6 +2327,110 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
        DPRINTK("EXIT\n");
 }
 
+/**
+ *     ata_dev_reread_id - Reread the device identify device info
+ *     @ap: port where the device is
+ *     @dev: device to reread the identify device info
+ *
+ *     LOCKING:
+ */
+
+static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
+{
+       DECLARE_COMPLETION(wait);
+       struct ata_queued_cmd *qc;
+       unsigned long flags;
+       int rc;
+
+       qc = ata_qc_new_init(ap, dev);
+       BUG_ON(qc == NULL);
+
+       ata_sg_init_one(qc, dev->id, sizeof(dev->id));
+       qc->dma_dir = DMA_FROM_DEVICE;
+
+       if (dev->class == ATA_DEV_ATA) {
+               qc->tf.command = ATA_CMD_ID_ATA;
+               DPRINTK("do ATA identify\n");
+       } else {
+               qc->tf.command = ATA_CMD_ID_ATAPI;
+               DPRINTK("do ATAPI identify\n");
+       }
+
+       qc->tf.flags |= ATA_TFLAG_DEVICE;
+       qc->tf.protocol = ATA_PROT_PIO;
+       qc->nsect = 1;
+
+       qc->waiting = &wait;
+       qc->complete_fn = ata_qc_complete_noop;
+
+       spin_lock_irqsave(&ap->host_set->lock, flags);
+       rc = ata_qc_issue(qc);
+       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+       if (rc)
+               goto err_out;
+
+       wait_for_completion(&wait);
+
+       swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+       ata_dump_id(dev);
+
+       DPRINTK("EXIT\n");
+
+       return;
+err_out:
+       ata_port_disable(ap);
+}
+
+/**
+ *     ata_dev_init_params - Issue INIT DEV PARAMS command
+ *     @ap: Port associated with device @dev
+ *     @dev: Device to which command will be sent
+ *
+ *     LOCKING:
+ */
+
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
+{
+       DECLARE_COMPLETION(wait);
+       struct ata_queued_cmd *qc;
+       int rc;
+       unsigned long flags;
+       u16 sectors = dev->id[6];
+       u16 heads   = dev->id[3];
+
+       /* Number of sectors per track 1-255. Number of heads 1-16 */
+       if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
+               return;
+
+       /* set up init dev params taskfile */
+       DPRINTK("init dev params \n");
+
+       qc = ata_qc_new_init(ap, dev);
+       BUG_ON(qc == NULL);
+
+       qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
+       qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+       qc->tf.protocol = ATA_PROT_NODATA;
+       qc->tf.nsect = sectors;
+       qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
+
+       qc->waiting = &wait;
+       qc->complete_fn = ata_qc_complete_noop;
+
+       spin_lock_irqsave(&ap->host_set->lock, flags);
+       rc = ata_qc_issue(qc);
+       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+       if (rc)
+               ata_port_disable(ap);
+       else
+               wait_for_completion(&wait);
+
+       DPRINTK("EXIT\n");
+}
+
 /**
  *     ata_sg_clean - Unmap DMA memory associated with command
  *     @qc: Command containing DMA memory to be released
@@ -2413,32 +2701,32 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
 
 /**
  *     ata_pio_poll -
- *     @ap:
+ *     @ap: the target ata_port
  *
  *     LOCKING:
  *     None.  (executing in kernel thread context)
  *
  *     RETURNS:
- *
+ *     timeout value to use
  */
 
 static unsigned long ata_pio_poll(struct ata_port *ap)
 {
        u8 status;
-       unsigned int poll_state = PIO_ST_UNKNOWN;
-       unsigned int reg_state = PIO_ST_UNKNOWN;
-       const unsigned int tmout_state = PIO_ST_TMOUT;
-
-       switch (ap->pio_task_state) {
-       case PIO_ST:
-       case PIO_ST_POLL:
-               poll_state = PIO_ST_POLL;
-               reg_state = PIO_ST;
+       unsigned int poll_state = HSM_ST_UNKNOWN;
+       unsigned int reg_state = HSM_ST_UNKNOWN;
+       const unsigned int tmout_state = HSM_ST_TMOUT;
+
+       switch (ap->hsm_task_state) {
+       case HSM_ST:
+       case HSM_ST_POLL:
+               poll_state = HSM_ST_POLL;
+               reg_state = HSM_ST;
                break;
-       case PIO_ST_LAST:
-       case PIO_ST_LAST_POLL:
-               poll_state = PIO_ST_LAST_POLL;
-               reg_state = PIO_ST_LAST;
+       case HSM_ST_LAST:
+       case HSM_ST_LAST_POLL:
+               poll_state = HSM_ST_LAST_POLL;
+               reg_state = HSM_ST_LAST;
                break;
        default:
                BUG();
@@ -2448,20 +2736,20 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
        status = ata_chk_status(ap);
        if (status & ATA_BUSY) {
                if (time_after(jiffies, ap->pio_task_timeout)) {
-                       ap->pio_task_state = tmout_state;
+                       ap->hsm_task_state = tmout_state;
                        return 0;
                }
-               ap->pio_task_state = poll_state;
+               ap->hsm_task_state = poll_state;
                return ATA_SHORT_PAUSE;
        }
 
-       ap->pio_task_state = reg_state;
+       ap->hsm_task_state = reg_state;
        return 0;
 }
 
 /**
- *     ata_pio_complete -
- *     @ap:
+ *     ata_pio_complete - check if drive is busy or idle
+ *     @ap: the target ata_port
  *
  *     LOCKING:
  *     None.  (executing in kernel thread context)
@@ -2480,14 +2768,14 @@ static int ata_pio_complete (struct ata_port *ap)
         * we enter, BSY will be cleared in a chk-status or two.  If not,
         * the drive is probably seeking or something.  Snooze for a couple
         * msecs, then chk-status again.  If still busy, fall back to
-        * PIO_ST_POLL state.
+        * HSM_ST_POLL state.
         */
        drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
        if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
                msleep(2);
                drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
                if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
-                       ap->pio_task_state = PIO_ST_LAST_POLL;
+                       ap->hsm_task_state = HSM_ST_LAST_POLL;
                        ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
                        return 0;
                }
@@ -2495,14 +2783,14 @@ static int ata_pio_complete (struct ata_port *ap)
 
        drv_stat = ata_wait_idle(ap);
        if (!ata_ok(drv_stat)) {
-               ap->pio_task_state = PIO_ST_ERR;
+               ap->hsm_task_state = HSM_ST_ERR;
                return 0;
        }
 
        qc = ata_qc_from_tag(ap, ap->active_tag);
        assert(qc != NULL);
 
-       ap->pio_task_state = PIO_ST_IDLE;
+       ap->hsm_task_state = HSM_ST_IDLE;
 
        ata_poll_qc_complete(qc, drv_stat);
 
@@ -2513,7 +2801,7 @@ static int ata_pio_complete (struct ata_port *ap)
 
 
 /**
- *     swap_buf_le16 -
+ *     swap_buf_le16 - swap halves of 16-words in place
  *     @buf:  Buffer to swap
  *     @buf_words:  Number of 16-bit words in buffer.
  *
@@ -2522,6 +2810,7 @@ static int ata_pio_complete (struct ata_port *ap)
  *     vice-versa.
  *
  *     LOCKING:
+ *     Inherited from caller.
  */
 void swap_buf_le16(u16 *buf, unsigned int buf_words)
 {
@@ -2544,7 +2833,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
  *
  *     LOCKING:
  *     Inherited from caller.
- *
  */
 
 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2590,7 +2878,6 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
  *
  *     LOCKING:
  *     Inherited from caller.
- *
  */
 
 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2630,7 +2917,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
  *
  *     LOCKING:
  *     Inherited from caller.
- *
  */
 
 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2662,7 +2948,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
        unsigned char *buf;
 
        if (qc->cursect == (qc->nsect - 1))
-               ap->pio_task_state = PIO_ST_LAST;
+               ap->hsm_task_state = HSM_ST_LAST;
 
        page = sg[qc->cursg].page;
        offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2712,7 +2998,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
        unsigned int offset, count;
 
        if (qc->curbytes + bytes >= qc->nbytes)
-               ap->pio_task_state = PIO_ST_LAST;
+               ap->hsm_task_state = HSM_ST_LAST;
 
 next_sg:
        if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2734,7 +3020,7 @@ next_sg:
                for (i = 0; i < words; i++)
                        ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
 
-               ap->pio_task_state = PIO_ST_LAST;
+               ap->hsm_task_state = HSM_ST_LAST;
                return;
        }
 
@@ -2783,7 +3069,6 @@ next_sg:
  *
  *     LOCKING:
  *     Inherited from caller.
- *
  */
 
 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
@@ -2815,12 +3100,12 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
 err_out:
        printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
              ap->id, dev->devno);
-       ap->pio_task_state = PIO_ST_ERR;
+       ap->hsm_task_state = HSM_ST_ERR;
 }
 
 /**
- *     ata_pio_sector -
- *     @ap:
+ *     ata_pio_block - start PIO on a block
+ *     @ap: the target ata_port
  *
  *     LOCKING:
  *     None.  (executing in kernel thread context)
@@ -2832,19 +3117,19 @@ static void ata_pio_block(struct ata_port *ap)
        u8 status;
 
        /*
-        * This is purely hueristic.  This is a fast path.
+        * This is purely heuristic.  This is a fast path.
         * Sometimes when we enter, BSY will be cleared in
         * a chk-status or two.  If not, the drive is probably seeking
         * or something.  Snooze for a couple msecs, then
         * chk-status again.  If still busy, fall back to
-        * PIO_ST_POLL state.
+        * HSM_ST_POLL state.
         */
        status = ata_busy_wait(ap, ATA_BUSY, 5);
        if (status & ATA_BUSY) {
                msleep(2);
                status = ata_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
-                       ap->pio_task_state = PIO_ST_POLL;
+                       ap->hsm_task_state = HSM_ST_POLL;
                        ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
                        return;
                }
@@ -2856,7 +3141,7 @@ static void ata_pio_block(struct ata_port *ap)
        if (is_atapi_taskfile(&qc->tf)) {
                /* no more data to transfer or unsupported ATAPI command */
                if ((status & ATA_DRQ) == 0) {
-                       ap->pio_task_state = PIO_ST_LAST;
+                       ap->hsm_task_state = HSM_ST_LAST;
                        return;
                }
 
@@ -2864,7 +3149,7 @@ static void ata_pio_block(struct ata_port *ap)
        } else {
                /* handle BSY=0, DRQ=0 as error */
                if ((status & ATA_DRQ) == 0) {
-                       ap->pio_task_state = PIO_ST_ERR;
+                       ap->hsm_task_state = HSM_ST_ERR;
                        return;
                }
 
@@ -2884,7 +3169,7 @@ static void ata_pio_error(struct ata_port *ap)
        printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
               ap->id, drv_stat);
 
-       ap->pio_task_state = PIO_ST_IDLE;
+       ap->hsm_task_state = HSM_ST_IDLE;
 
        ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
 }
@@ -2899,25 +3184,25 @@ fsm_start:
        timeout = 0;
        qc_completed = 0;
 
-       switch (ap->pio_task_state) {
-       case PIO_ST_IDLE:
+       switch (ap->hsm_task_state) {
+       case HSM_ST_IDLE:
                return;
 
-       case PIO_ST:
+       case HSM_ST:
                ata_pio_block(ap);
                break;
 
-       case PIO_ST_LAST:
+       case HSM_ST_LAST:
                qc_completed = ata_pio_complete(ap);
                break;
 
-       case PIO_ST_POLL:
-       case PIO_ST_LAST_POLL:
+       case HSM_ST_POLL:
+       case HSM_ST_LAST_POLL:
                timeout = ata_pio_poll(ap);
                break;
 
-       case PIO_ST_TMOUT:
-       case PIO_ST_ERR:
+       case HSM_ST_TMOUT:
+       case HSM_ST_ERR:
                ata_pio_error(ap);
                return;
        }
@@ -2928,52 +3213,6 @@ fsm_start:
                goto fsm_start;
 }
 
-static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
-                               struct scsi_cmnd *cmd)
-{
-       DECLARE_COMPLETION(wait);
-       struct ata_queued_cmd *qc;
-       unsigned long flags;
-       int rc;
-
-       DPRINTK("ATAPI request sense\n");
-
-       qc = ata_qc_new_init(ap, dev);
-       BUG_ON(qc == NULL);
-
-       /* FIXME: is this needed? */
-       memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
-
-       ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
-       qc->dma_dir = DMA_FROM_DEVICE;
-
-       memset(&qc->cdb, 0, ap->cdb_len);
-       qc->cdb[0] = REQUEST_SENSE;
-       qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
-
-       qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       qc->tf.command = ATA_CMD_PACKET;
-
-       qc->tf.protocol = ATA_PROT_ATAPI;
-       qc->tf.lbam = (8 * 1024) & 0xff;
-       qc->tf.lbah = (8 * 1024) >> 8;
-       qc->nbytes = SCSI_SENSE_BUFFERSIZE;
-
-       qc->waiting = &wait;
-       qc->complete_fn = ata_qc_complete_noop;
-
-       spin_lock_irqsave(&ap->host_set->lock, flags);
-       rc = ata_qc_issue(qc);
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
-       if (rc)
-               ata_port_disable(ap);
-       else
-               wait_for_completion(&wait);
-
-       DPRINTK("EXIT\n");
-}
-
 /**
  *     ata_qc_timeout - Handle timeout of queued command
  *     @qc: Command that timed out
@@ -3091,14 +3330,14 @@ void ata_eng_timeout(struct ata_port *ap)
        DPRINTK("ENTER\n");
 
        qc = ata_qc_from_tag(ap, ap->active_tag);
-       if (!qc) {
+       if (qc)
+               ata_qc_timeout(qc);
+       else {
                printk(KERN_ERR "ata%u: BUG: timeout without command\n",
                       ap->id);
                goto out;
        }
 
-       ata_qc_timeout(qc);
-
 out:
        DPRINTK("EXIT\n");
 }
@@ -3155,15 +3394,12 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
                qc->nbytes = qc->curbytes = 0;
 
                ata_tf_init(ap, &qc->tf, dev->devno);
-
-               if (dev->flags & ATA_DFLAG_LBA48)
-                       qc->tf.flags |= ATA_TFLAG_LBA48;
        }
 
        return qc;
 }
 
-static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
+int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
 {
        return 0;
 }
@@ -3201,7 +3437,6 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
  *
  *     LOCKING:
  *     spin_lock_irqsave(host_set lock)
- *
  */
 void ata_qc_free(struct ata_queued_cmd *qc)
 {
@@ -3221,7 +3456,6 @@ void ata_qc_free(struct ata_queued_cmd *qc)
  *
  *     LOCKING:
  *     spin_lock_irqsave(host_set lock)
- *
  */
 
 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
@@ -3360,7 +3594,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
        case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
                ata_qc_set_polling(qc);
                ata_tf_to_host_nolock(ap, &qc->tf);
-               ap->pio_task_state = PIO_ST;
+               ap->hsm_task_state = HSM_ST;
                queue_work(ata_wq, &ap->pio_task);
                break;
 
@@ -3586,7 +3820,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
                void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
                host_stat = readb(mmio + ATA_DMA_STATUS);
        } else
-       host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+               host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
        return host_stat;
 }
 
@@ -3715,7 +3949,6 @@ idle_irq:
  *
  *     RETURNS:
  *     IRQ_NONE or IRQ_HANDLED.
- *
  */
 
 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
@@ -3806,7 +4039,7 @@ static void atapi_packet_task(void *_data)
                ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
 
                /* PIO commands are handled by polling */
-               ap->pio_task_state = PIO_ST;
+               ap->hsm_task_state = HSM_ST;
                queue_work(ata_wq, &ap->pio_task);
        }
 
@@ -3827,6 +4060,7 @@ err_out:
  *     May be used as the port_start() entry in ata_port_operations.
  *
  *     LOCKING:
+ *     Inherited from caller.
  */
 
 int ata_port_start (struct ata_port *ap)
@@ -3852,6 +4086,7 @@ int ata_port_start (struct ata_port *ap)
  *     May be used as the port_stop() entry in ata_port_operations.
  *
  *     LOCKING:
+ *     Inherited from caller.
  */
 
 void ata_port_stop (struct ata_port *ap)
@@ -3874,6 +4109,7 @@ void ata_host_stop (struct ata_host_set *host_set)
  *     @do_unregister: 1 if we fully unregister, 0 to just stop the port
  *
  *     LOCKING:
+ *     Inherited from caller.
  */
 
 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
@@ -3901,12 +4137,11 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
  *
  *     LOCKING:
  *     Inherited from caller.
- *
  */
 
 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
                          struct ata_host_set *host_set,
-                         struct ata_probe_ent *ent, unsigned int port_no)
+                         const struct ata_probe_ent *ent, unsigned int port_no)
 {
        unsigned int i;
 
@@ -3962,10 +4197,9 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
  *
  *     RETURNS:
  *     New ata_port on success, for NULL on error.
- *
  */
 
-static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
+static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
                                      struct ata_host_set *host_set,
                                      unsigned int port_no)
 {
@@ -4010,10 +4244,9 @@ err_out:
  *
  *     RETURNS:
  *     Number of ports registered.  Zero on error (no ports registered).
- *
  */
 
-int ata_device_add(struct ata_probe_ent *ent)
+int ata_device_add(const struct ata_probe_ent *ent)
 {
        unsigned int count = 0, i;
        struct device *dev = ent->dev;
@@ -4113,7 +4346,7 @@ int ata_device_add(struct ata_probe_ent *ent)
        for (i = 0; i < count; i++) {
                struct ata_port *ap = host_set->ports[i];
 
-               scsi_scan_host(ap->host);
+               ata_scsi_scan_host(ap);
        }
 
        dev_set_drvdata(dev, host_set);
@@ -4142,7 +4375,6 @@ err_out:
  *     Inherited from calling layer (may sleep).
  */
 
-
 void ata_host_set_remove(struct ata_host_set *host_set)
 {
        struct ata_port *ap;
@@ -4232,7 +4464,7 @@ void ata_std_ports(struct ata_ioports *ioaddr)
 }
 
 static struct ata_probe_ent *
-ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
+ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
 {
        struct ata_probe_ent *probe_ent;
 
@@ -4273,85 +4505,86 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
  *     ata_pci_init_native_mode - Initialize native-mode driver
  *     @pdev:  pci device to be initialized
  *     @port:  array[2] of pointers to port info structures.
+ *     @ports: bitmap of ports present
  *
  *     Utility function which allocates and initializes an
  *     ata_probe_ent structure for a standard dual-port
  *     PIO-based IDE controller.  The returned ata_probe_ent
  *     structure can be passed to ata_device_add().  The returned
  *     ata_probe_ent structure should then be freed with kfree().
+ *
+ *     The caller need only pass the address of the primary port, the
+ *     secondary will be deduced automatically. If the device has non
+ *     standard secondary port mappings this function can be called twice,
+ *     once for each interface.
  */
 
 struct ata_probe_ent *
-ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
+ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
 {
        struct ata_probe_ent *probe_ent =
                ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
+       int p = 0;
+
        if (!probe_ent)
                return NULL;
 
-       probe_ent->n_ports = 2;
        probe_ent->irq = pdev->irq;
        probe_ent->irq_flags = SA_SHIRQ;
 
-       probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
-       probe_ent->port[0].altstatus_addr =
-       probe_ent->port[0].ctl_addr =
-               pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
-       probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
-
-       probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
-       probe_ent->port[1].altstatus_addr =
-       probe_ent->port[1].ctl_addr =
-               pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
-       probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+       if (ports & ATA_PORT_PRIMARY) {
+               probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
+               probe_ent->port[p].altstatus_addr =
+               probe_ent->port[p].ctl_addr =
+                       pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
+               probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
+               ata_std_ports(&probe_ent->port[p]);
+               p++;
+       }
 
-       ata_std_ports(&probe_ent->port[0]);
-       ata_std_ports(&probe_ent->port[1]);
+       if (ports & ATA_PORT_SECONDARY) {
+               probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
+               probe_ent->port[p].altstatus_addr =
+               probe_ent->port[p].ctl_addr =
+                       pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
+               probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+               ata_std_ports(&probe_ent->port[p]);
+               p++;
+       }
 
+       probe_ent->n_ports = p;
        return probe_ent;
 }
 
-static struct ata_probe_ent *
-ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
-    struct ata_probe_ent **ppe2)
+static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num)
 {
-       struct ata_probe_ent *probe_ent, *probe_ent2;
+       struct ata_probe_ent *probe_ent;
 
        probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
        if (!probe_ent)
                return NULL;
-       probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
-       if (!probe_ent2) {
-               kfree(probe_ent);
-               return NULL;
-       }
-
-       probe_ent->n_ports = 1;
-       probe_ent->irq = 14;
 
-       probe_ent->hard_port_no = 0;
        probe_ent->legacy_mode = 1;
-
-       probe_ent2->n_ports = 1;
-       probe_ent2->irq = 15;
-
-       probe_ent2->hard_port_no = 1;
-       probe_ent2->legacy_mode = 1;
-
-       probe_ent->port[0].cmd_addr = 0x1f0;
-       probe_ent->port[0].altstatus_addr =
-       probe_ent->port[0].ctl_addr = 0x3f6;
-       probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
-
-       probe_ent2->port[0].cmd_addr = 0x170;
-       probe_ent2->port[0].altstatus_addr =
-       probe_ent2->port[0].ctl_addr = 0x376;
-       probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
-
+       probe_ent->n_ports = 1;
+       probe_ent->hard_port_no = port_num;
+
+       switch(port_num)
+       {
+               case 0:
+                       probe_ent->irq = 14;
+                       probe_ent->port[0].cmd_addr = 0x1f0;
+                       probe_ent->port[0].altstatus_addr =
+                       probe_ent->port[0].ctl_addr = 0x3f6;
+                       break;
+               case 1:
+                       probe_ent->irq = 15;
+                       probe_ent->port[0].cmd_addr = 0x170;
+                       probe_ent->port[0].altstatus_addr =
+                       probe_ent->port[0].ctl_addr = 0x376;
+                       break;
+       }
+       probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
        ata_std_ports(&probe_ent->port[0]);
-       ata_std_ports(&probe_ent2->port[0]);
-
-       *ppe2 = probe_ent2;
        return probe_ent;
 }
 
@@ -4374,13 +4607,12 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
  *
  *     RETURNS:
  *     Zero on success, negative on errno-based value on error.
- *
  */
 
 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
                      unsigned int n_ports)
 {
-       struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
+       struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
        struct ata_port_info *port[2];
        u8 tmp8, mask;
        unsigned int legacy_mode = 0;
@@ -4397,7 +4629,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 
        if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
            && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
-               /* TODO: support transitioning to native mode? */
+               /* TODO: What if one channel is in native mode ... */
                pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
                mask = (1 << 2) | (1 << 0);
                if ((tmp8 & mask) != mask)
@@ -4405,11 +4637,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
        }
 
        /* FIXME... */
-       if ((!legacy_mode) && (n_ports > 1)) {
-               printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
-               return -EINVAL;
+       if ((!legacy_mode) && (n_ports > 2)) {
+               printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
+               n_ports = 2;
+               /* For now */
        }
 
+       /* FIXME: Really for ATA it isn't safe because the device may be
+          multi-purpose and we want to leave it alone if it was already
+          enabled. Secondly for shared use as Arjan says we want refcounting
+          
+          Checking dev->is_enabled is insufficient as this is not set at
+          boot for the primary video which is BIOS enabled
+         */
+         
        rc = pci_enable_device(pdev);
        if (rc)
                return rc;
@@ -4420,6 +4661,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
                goto err_out;
        }
 
+       /* FIXME: Should use platform specific mappers for legacy port ranges */
        if (legacy_mode) {
                if (!request_region(0x1f0, 8, "libata")) {
                        struct resource *conflict, res;
@@ -4464,10 +4706,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
                goto err_out_regions;
 
        if (legacy_mode) {
-               probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
-       } else
-               probe_ent = ata_pci_init_native_mode(pdev, port);
-       if (!probe_ent) {
+               if (legacy_mode & (1 << 0))
+                       probe_ent = ata_pci_init_legacy_port(pdev, port, 0);
+               if (legacy_mode & (1 << 1))
+                       probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1);
+       } else {
+               if (n_ports == 2)
+                       probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
+               else
+                       probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
+       }
+       if (!probe_ent && !probe_ent2) {
                rc = -ENOMEM;
                goto err_out_regions;
        }
@@ -4505,7 +4754,7 @@ err_out:
  *     @pdev: PCI device that was removed
  *
  *     PCI layer indicates to libata via this hook that
- *     hot-unplug or module unload event has occured.
+ *     hot-unplug or module unload event has occurred.
  *     Handle this by unregistering all objects associated
  *     with this PCI device.  Free those objects.  Then finally
  *     release PCI resources and disable device.
@@ -4526,7 +4775,7 @@ void ata_pci_remove_one (struct pci_dev *pdev)
 }
 
 /* move to PCI subsystem */
-int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
+int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
 {
        unsigned long tmp = 0;
 
@@ -4579,6 +4828,27 @@ static void __exit ata_exit(void)
 module_init(ata_init);
 module_exit(ata_exit);
 
+static unsigned long ratelimit_time;
+static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
+
+int ata_ratelimit(void)
+{
+       int rc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ata_ratelimit_lock, flags);
+
+       if (time_after(jiffies, ratelimit_time)) {
+               rc = 1;
+               ratelimit_time = jiffies + (HZ/5);
+       } else
+               rc = 0;
+
+       spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
+
+       return rc;
+}
+
 /*
  * libata is essentially a library of internal helper functions for
  * low-level ATA host controller drivers.  As such, the API/ABI is
@@ -4620,6 +4890,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
 EXPORT_SYMBOL_GPL(__sata_phy_reset);
 EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_port_disable);
+EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 EXPORT_SYMBOL_GPL(ata_scsi_error);
@@ -4631,6 +4902,9 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
 EXPORT_SYMBOL_GPL(ata_dev_config);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
 
+EXPORT_SYMBOL_GPL(ata_timing_compute);
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
index 104fd9a63e734ddc752f68de92215a208242a0d1..58858886d751c8886fb4c5af8d354c57ff3069fc 100644 (file)
 
 #include "libata.h"
 
-typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, u8 *scsicmd);
+typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
 static struct ata_device *
-ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
+ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
 
 
+static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
+                                  void (*done)(struct scsi_cmnd *))
+{
+       ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+       /* "Invalid field in cbd" */
+       done(cmd);
+}
+
 /**
  *     ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
  *     @sdev: SCSI device for which BIOS geometry is to be determined
@@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
 {
        struct scsi_cmnd *cmd = qc->scsicmd;
        u8 err = 0;
-       unsigned char *sb = cmd->sense_buffer;
        /* Based on the 3ware driver translation table */
        static unsigned char sense_table[][4] = {
                /* BBD|ECC|ID|MAR */
@@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
        };
        int i = 0;
 
-       cmd->result = SAM_STAT_CHECK_CONDITION;
-
        /*
         *      Is this an error we can process/parse
         */
@@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
                /* Look for best matches first */
                if((sense_table[i][0] & err) == sense_table[i][0])
                {
-                       sb[0] = 0x70;
-                       sb[2] = sense_table[i][1];
-                       sb[7] = 0x0a;
-                       sb[12] = sense_table[i][2];
-                       sb[13] = sense_table[i][3];
+                       ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
+                                          sense_table[i][2] /* asc */,
+                                          sense_table[i][3] /* ascq */ );
                        return;
                }
                i++;
@@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
        {
                if(stat_table[i][0] & drv_stat)
                {
-                       sb[0] = 0x70;
-                       sb[2] = stat_table[i][1];
-                       sb[7] = 0x0a;
-                       sb[12] = stat_table[i][2];
-                       sb[13] = stat_table[i][3];
+                       ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
+                                          sense_table[i][2] /* asc */,
+                                          sense_table[i][3] /* ascq */ );
                        return;
                }
                i++;
@@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
        printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
        /* additional-sense-code[-qualifier] */
 
-       sb[0] = 0x70;
-       sb[2] = MEDIUM_ERROR;
-       sb[7] = 0x0A;
        if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-               sb[12] = 0x11; /* "unrecovered read error" */
-               sb[13] = 0x04;
+               ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4);
+               /* "unrecovered read error" */
        } else {
-               sb[12] = 0x0C; /* "write error -             */
-               sb[13] = 0x02; /*  auto-reallocation failed" */
+               ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2);
+               /* "write error - auto-reallocation failed" */
        }
 }
 
@@ -420,7 +418,7 @@ int ata_scsi_error(struct Scsi_Host *host)
  */
 
 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
-                                            u8 *scsicmd)
+                                            const u8 *scsicmd)
 {
        struct ata_taskfile *tf = &qc->tf;
 
@@ -430,15 +428,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
                ;       /* ignore IMMED bit, violates sat-r05 */
        }
        if (scsicmd[4] & 0x2)
-               return 1;       /* LOEJ bit set not supported */
+               goto invalid_fld;       /* LOEJ bit set not supported */
        if (((scsicmd[4] >> 4) & 0xf) != 0)
-               return 1;       /* power conditions not supported */
+               goto invalid_fld;       /* power conditions not supported */
        if (scsicmd[4] & 0x1) {
                tf->nsect = 1;  /* 1 sector, lba=0 */
-               tf->lbah = 0x0;
-               tf->lbam = 0x0;
-               tf->lbal = 0x0;
-               tf->device |= ATA_LBA;
+
+               if (qc->dev->flags & ATA_DFLAG_LBA) {
+                       qc->tf.flags |= ATA_TFLAG_LBA;
+
+                       tf->lbah = 0x0;
+                       tf->lbam = 0x0;
+                       tf->lbal = 0x0;
+                       tf->device |= ATA_LBA;
+               } else {
+                       /* CHS */
+                       tf->lbal = 0x1; /* sect */
+                       tf->lbam = 0x0; /* cyl low */
+                       tf->lbah = 0x0; /* cyl high */
+               }
+
                tf->command = ATA_CMD_VERIFY;   /* READ VERIFY */
        } else {
                tf->nsect = 0;  /* time period value (0 implies now) */
@@ -453,6 +462,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
         */
 
        return 0;
+
+invalid_fld:
+       ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+       /* "Invalid field in cbd" */
+       return 1;
 }
 
 
@@ -471,14 +485,14 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
  *     Zero on success, non-zero on error.
  */
 
-static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
 {
        struct ata_taskfile *tf = &qc->tf;
 
        tf->flags |= ATA_TFLAG_DEVICE;
        tf->protocol = ATA_PROT_NODATA;
 
-       if ((tf->flags & ATA_TFLAG_LBA48) &&
+       if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
            (ata_id_has_flush_ext(qc->dev->id)))
                tf->command = ATA_CMD_FLUSH_EXT;
        else
@@ -487,6 +501,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
        return 0;
 }
 
+/**
+ *     scsi_6_lba_len - Get LBA and transfer length
+ *     @scsicmd: SCSI command to translate
+ *
+ *     Calculate LBA and transfer length for 6-byte commands.
+ *
+ *     RETURNS:
+ *     @plba: the LBA
+ *     @plen: the transfer length
+ */
+
+static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
+{
+       u64 lba = 0;
+       u32 len = 0;
+
+       VPRINTK("six-byte command\n");
+
+       lba |= ((u64)scsicmd[2]) << 8;
+       lba |= ((u64)scsicmd[3]);
+
+       len |= ((u32)scsicmd[4]);
+
+       *plba = lba;
+       *plen = len;
+}
+
+/**
+ *     scsi_10_lba_len - Get LBA and transfer length
+ *     @scsicmd: SCSI command to translate
+ *
+ *     Calculate LBA and transfer length for 10-byte commands.
+ *
+ *     RETURNS:
+ *     @plba: the LBA
+ *     @plen: the transfer length
+ */
+
+static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
+{
+       u64 lba = 0;
+       u32 len = 0;
+
+       VPRINTK("ten-byte command\n");
+
+       lba |= ((u64)scsicmd[2]) << 24;
+       lba |= ((u64)scsicmd[3]) << 16;
+       lba |= ((u64)scsicmd[4]) << 8;
+       lba |= ((u64)scsicmd[5]);
+
+       len |= ((u32)scsicmd[7]) << 8;
+       len |= ((u32)scsicmd[8]);
+
+       *plba = lba;
+       *plen = len;
+}
+
+/**
+ *     scsi_16_lba_len - Get LBA and transfer length
+ *     @scsicmd: SCSI command to translate
+ *
+ *     Calculate LBA and transfer length for 16-byte commands.
+ *
+ *     RETURNS:
+ *     @plba: the LBA
+ *     @plen: the transfer length
+ */
+
+static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
+{
+       u64 lba = 0;
+       u32 len = 0;
+
+       VPRINTK("sixteen-byte command\n");
+
+       lba |= ((u64)scsicmd[2]) << 56;
+       lba |= ((u64)scsicmd[3]) << 48;
+       lba |= ((u64)scsicmd[4]) << 40;
+       lba |= ((u64)scsicmd[5]) << 32;
+       lba |= ((u64)scsicmd[6]) << 24;
+       lba |= ((u64)scsicmd[7]) << 16;
+       lba |= ((u64)scsicmd[8]) << 8;
+       lba |= ((u64)scsicmd[9]);
+
+       len |= ((u32)scsicmd[10]) << 24;
+       len |= ((u32)scsicmd[11]) << 16;
+       len |= ((u32)scsicmd[12]) << 8;
+       len |= ((u32)scsicmd[13]);
+
+       *plba = lba;
+       *plen = len;
+}
+
 /**
  *     ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
  *     @qc: Storage for translated ATA taskfile
@@ -501,82 +608,110 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
  *     Zero on success, non-zero on error.
  */
 
-static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
 {
        struct ata_taskfile *tf = &qc->tf;
-       unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
+       struct ata_device *dev = qc->dev;
        u64 dev_sectors = qc->dev->n_sectors;
-       u64 sect = 0;
-       u32 n_sect = 0;
+       u64 block;
+       u32 n_block;
 
        tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        tf->protocol = ATA_PROT_NODATA;
-       tf->device |= ATA_LBA;
 
-       if (scsicmd[0] == VERIFY) {
-               sect |= ((u64)scsicmd[2]) << 24;
-               sect |= ((u64)scsicmd[3]) << 16;
-               sect |= ((u64)scsicmd[4]) << 8;
-               sect |= ((u64)scsicmd[5]);
+       if (scsicmd[0] == VERIFY)
+               scsi_10_lba_len(scsicmd, &block, &n_block);
+       else if (scsicmd[0] == VERIFY_16)
+               scsi_16_lba_len(scsicmd, &block, &n_block);
+       else
+               goto invalid_fld;
 
-               n_sect |= ((u32)scsicmd[7]) << 8;
-               n_sect |= ((u32)scsicmd[8]);
-       }
+       if (!n_block)
+               goto nothing_to_do;
+       if (block >= dev_sectors)
+               goto out_of_range;
+       if ((block + n_block) > dev_sectors)
+               goto out_of_range;
 
-       else if (scsicmd[0] == VERIFY_16) {
-               sect |= ((u64)scsicmd[2]) << 56;
-               sect |= ((u64)scsicmd[3]) << 48;
-               sect |= ((u64)scsicmd[4]) << 40;
-               sect |= ((u64)scsicmd[5]) << 32;
-               sect |= ((u64)scsicmd[6]) << 24;
-               sect |= ((u64)scsicmd[7]) << 16;
-               sect |= ((u64)scsicmd[8]) << 8;
-               sect |= ((u64)scsicmd[9]);
-
-               n_sect |= ((u32)scsicmd[10]) << 24;
-               n_sect |= ((u32)scsicmd[11]) << 16;
-               n_sect |= ((u32)scsicmd[12]) << 8;
-               n_sect |= ((u32)scsicmd[13]);
-       }
+       if (dev->flags & ATA_DFLAG_LBA) {
+               tf->flags |= ATA_TFLAG_LBA;
 
-       else
-               return 1;
+               if (dev->flags & ATA_DFLAG_LBA48) {
+                       if (n_block > (64 * 1024))
+                               goto invalid_fld;
 
-       if (!n_sect)
-               return 1;
-       if (sect >= dev_sectors)
-               return 1;
-       if ((sect + n_sect) > dev_sectors)
-               return 1;
-       if (lba48) {
-               if (n_sect > (64 * 1024))
-                       return 1;
-       } else {
-               if (n_sect > 256)
-                       return 1;
-       }
+                       /* use LBA48 */
+                       tf->flags |= ATA_TFLAG_LBA48;
+                       tf->command = ATA_CMD_VERIFY_EXT;
 
-       if (lba48) {
-               tf->command = ATA_CMD_VERIFY_EXT;
+                       tf->hob_nsect = (n_block >> 8) & 0xff;
 
-               tf->hob_nsect = (n_sect >> 8) & 0xff;
+                       tf->hob_lbah = (block >> 40) & 0xff;
+                       tf->hob_lbam = (block >> 32) & 0xff;
+                       tf->hob_lbal = (block >> 24) & 0xff;
+               } else {
+                       if (n_block > 256)
+                               goto invalid_fld;
 
-               tf->hob_lbah = (sect >> 40) & 0xff;
-               tf->hob_lbam = (sect >> 32) & 0xff;
-               tf->hob_lbal = (sect >> 24) & 0xff;
+                       /* use LBA28 */
+                       tf->command = ATA_CMD_VERIFY;
+
+                       tf->device |= (block >> 24) & 0xf;
+               }
+
+               tf->nsect = n_block & 0xff;
+
+               tf->lbah = (block >> 16) & 0xff;
+               tf->lbam = (block >> 8) & 0xff;
+               tf->lbal = block & 0xff;
+
+               tf->device |= ATA_LBA;
        } else {
+               /* CHS */
+               u32 sect, head, cyl, track;
+
+               if (n_block > 256)
+                       goto invalid_fld;
+
+               /* Convert LBA to CHS */
+               track = (u32)block / dev->sectors;
+               cyl   = track / dev->heads;
+               head  = track % dev->heads;
+               sect  = (u32)block % dev->sectors + 1;
+
+               DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+                       (u32)block, track, cyl, head, sect);
+               
+               /* Check whether the converted CHS can fit. 
+                  Cylinder: 0-65535 
+                  Head: 0-15
+                  Sector: 1-255*/
+               if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 
+                       goto out_of_range;
+               
                tf->command = ATA_CMD_VERIFY;
-
-               tf->device |= (sect >> 24) & 0xf;
+               tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+               tf->lbal = sect;
+               tf->lbam = cyl;
+               tf->lbah = cyl >> 8;
+               tf->device |= head;
        }
 
-       tf->nsect = n_sect & 0xff;
+       return 0;
+
+invalid_fld:
+       ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+       /* "Invalid field in cbd" */
+       return 1;
 
-       tf->lbah = (sect >> 16) & 0xff;
-       tf->lbam = (sect >> 8) & 0xff;
-       tf->lbal = sect & 0xff;
+out_of_range:
+       ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
+       /* "Logical Block Address out of range" */
+       return 1;
 
-       return 0;
+nothing_to_do:
+       qc->scsicmd->result = SAM_STAT_GOOD;
+       return 1;
 }
 
 /**
@@ -599,106 +734,137 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
  *     Zero on success, non-zero on error.
  */
 
-static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
 {
        struct ata_taskfile *tf = &qc->tf;
-       unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
+       struct ata_device *dev = qc->dev;
+       u64 block;
+       u32 n_block;
 
        tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       tf->protocol = qc->dev->xfer_protocol;
-       tf->device |= ATA_LBA;
 
-       if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
-           scsicmd[0] == READ_16) {
-               tf->command = qc->dev->read_cmd;
-       } else {
-               tf->command = qc->dev->write_cmd;
+       if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
+           scsicmd[0] == WRITE_16)
                tf->flags |= ATA_TFLAG_WRITE;
-       }
 
-       if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) {
-               if (lba48) {
-                       tf->hob_nsect = scsicmd[7];
-                       tf->hob_lbal = scsicmd[2];
+       /* Calculate the SCSI LBA and transfer length. */
+       switch (scsicmd[0]) {
+       case READ_10:
+       case WRITE_10:
+               scsi_10_lba_len(scsicmd, &block, &n_block);
+               break;
+       case READ_6:
+       case WRITE_6:
+               scsi_6_lba_len(scsicmd, &block, &n_block);
 
-                       qc->nsect = ((unsigned int)scsicmd[7] << 8) |
-                                       scsicmd[8];
-               } else {
-                       /* if we don't support LBA48 addressing, the request
-                        * -may- be too large. */
-                       if ((scsicmd[2] & 0xf0) || scsicmd[7])
-                               return 1;
+               /* for 6-byte r/w commands, transfer length 0
+                * means 256 blocks of data, not 0 block.
+                */
+               if (!n_block)
+                       n_block = 256;
+               break;
+       case READ_16:
+       case WRITE_16:
+               scsi_16_lba_len(scsicmd, &block, &n_block);
+               break;
+       default:
+               DPRINTK("no-byte command\n");
+               goto invalid_fld;
+       }
 
-                       /* stores LBA27:24 in lower 4 bits of device reg */
-                       tf->device |= scsicmd[2];
+       /* Check and compose ATA command */
+       if (!n_block)
+               /* For 10-byte and 16-byte SCSI R/W commands, transfer
+                * length 0 means transfer 0 block of data.
+                * However, for ATA R/W commands, sector count 0 means
+                * 256 or 65536 sectors, not 0 sectors as in SCSI.
+                */
+               goto nothing_to_do;
 
-                       qc->nsect = scsicmd[8];
-               }
+       if (dev->flags & ATA_DFLAG_LBA) {
+               tf->flags |= ATA_TFLAG_LBA;
 
-               tf->nsect = scsicmd[8];
-               tf->lbal = scsicmd[5];
-               tf->lbam = scsicmd[4];
-               tf->lbah = scsicmd[3];
+               if (dev->flags & ATA_DFLAG_LBA48) {
+                       /* The request -may- be too large for LBA48. */
+                       if ((block >> 48) || (n_block > 65536))
+                               goto out_of_range;
 
-               VPRINTK("ten-byte command\n");
-               if (qc->nsect == 0) /* we don't support length==0 cmds */
-                       return 1;
-               return 0;
-       }
+                       /* use LBA48 */
+                       tf->flags |= ATA_TFLAG_LBA48;
 
-       if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) {
-               qc->nsect = tf->nsect = scsicmd[4];
-               if (!qc->nsect) {
-                       qc->nsect = 256;
-                       if (lba48)
-                               tf->hob_nsect = 1;
-               }
+                       tf->hob_nsect = (n_block >> 8) & 0xff;
 
-               tf->lbal = scsicmd[3];
-               tf->lbam = scsicmd[2];
-               tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
+                       tf->hob_lbah = (block >> 40) & 0xff;
+                       tf->hob_lbam = (block >> 32) & 0xff;
+                       tf->hob_lbal = (block >> 24) & 0xff;
+               } else { 
+                       /* use LBA28 */
 
-               VPRINTK("six-byte command\n");
-               return 0;
-       }
+                       /* The request -may- be too large for LBA28. */
+                       if ((block >> 28) || (n_block > 256))
+                               goto out_of_range;
 
-       if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) {
-               /* rule out impossible LBAs and sector counts */
-               if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
-                       return 1;
+                       tf->device |= (block >> 24) & 0xf;
+               }
 
-               if (lba48) {
-                       tf->hob_nsect = scsicmd[12];
-                       tf->hob_lbal = scsicmd[6];
-                       tf->hob_lbam = scsicmd[5];
-                       tf->hob_lbah = scsicmd[4];
+               ata_rwcmd_protocol(qc);
 
-                       qc->nsect = ((unsigned int)scsicmd[12] << 8) |
-                                       scsicmd[13];
-               } else {
-                       /* once again, filter out impossible non-zero values */
-                       if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
-                           (scsicmd[6] & 0xf0))
-                               return 1;
+               qc->nsect = n_block;
+               tf->nsect = n_block & 0xff;
 
-                       /* stores LBA27:24 in lower 4 bits of device reg */
-                       tf->device |= scsicmd[6];
+               tf->lbah = (block >> 16) & 0xff;
+               tf->lbam = (block >> 8) & 0xff;
+               tf->lbal = block & 0xff;
 
-                       qc->nsect = scsicmd[13];
-               }
+               tf->device |= ATA_LBA;
+       } else { 
+               /* CHS */
+               u32 sect, head, cyl, track;
+
+               /* The request -may- be too large for CHS addressing. */
+               if ((block >> 28) || (n_block > 256))
+                       goto out_of_range;
+
+               ata_rwcmd_protocol(qc);
+
+               /* Convert LBA to CHS */
+               track = (u32)block / dev->sectors;
+               cyl   = track / dev->heads;
+               head  = track % dev->heads;
+               sect  = (u32)block % dev->sectors + 1;
+
+               DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+                       (u32)block, track, cyl, head, sect);
+
+               /* Check whether the converted CHS can fit. 
+                  Cylinder: 0-65535 
+                  Head: 0-15
+                  Sector: 1-255*/
+               if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+                       goto out_of_range;
+
+               qc->nsect = n_block;
+               tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+               tf->lbal = sect;
+               tf->lbam = cyl;
+               tf->lbah = cyl >> 8;
+               tf->device |= head;
+       }
 
-               tf->nsect = scsicmd[13];
-               tf->lbal = scsicmd[9];
-               tf->lbam = scsicmd[8];
-               tf->lbah = scsicmd[7];
+       return 0;
 
-               VPRINTK("sixteen-byte command\n");
-               if (qc->nsect == 0) /* we don't support length==0 cmds */
-                       return 1;
-               return 0;
-       }
+invalid_fld:
+       ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+       /* "Invalid field in cbd" */
+       return 1;
+
+out_of_range:
+       ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
+       /* "Logical Block Address out of range" */
+       return 1;
 
-       DPRINTK("no-byte command\n");
+nothing_to_do:
+       qc->scsicmd->result = SAM_STAT_GOOD;
        return 1;
 }
 
@@ -731,6 +897,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
  *     This function sets up an ata_queued_cmd structure for the
  *     SCSI command, and sends that ata_queued_cmd to the hardware.
  *
+ *     The xlat_func argument (actor) returns 0 if ready to execute
+ *     ATA command, else 1 to finish translation. If 1 is returned
+ *     then cmd->result (and possibly cmd->sense_buffer) are assumed
+ *     to be set reflecting an error condition or clean (early)
+ *     termination.
+ *
  *     LOCKING:
  *     spin_lock_irqsave(host_set lock)
  */
@@ -747,7 +919,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
 
        qc = ata_scsi_qc_new(ap, dev, cmd, done);
        if (!qc)
-               return;
+               goto err_mem;
 
        /* data is present; dma-map it */
        if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
@@ -755,7 +927,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
                if (unlikely(cmd->request_bufflen < 1)) {
                        printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
                               ap->id, dev->devno);
-                       goto err_out;
+                       goto err_did;
                }
 
                if (cmd->use_sg)
@@ -770,19 +942,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
        qc->complete_fn = ata_scsi_qc_complete;
 
        if (xlat_func(qc, scsicmd))
-               goto err_out;
+               goto early_finish;
 
        /* select device, send command to hardware */
        if (ata_qc_issue(qc))
-               goto err_out;
+               goto err_did;
 
        VPRINTK("EXIT\n");
        return;
 
-err_out:
+early_finish:
+        ata_qc_free(qc);
+       done(cmd);
+       DPRINTK("EXIT - early finish (good or error)\n");
+       return;
+
+err_did:
        ata_qc_free(qc);
-       ata_bad_cdb(cmd, done);
-       DPRINTK("EXIT - badcmd\n");
+err_mem:
+       cmd->result = (DID_ERROR << 16);
+       done(cmd);
+       DPRINTK("EXIT - internal\n");
+       return;
 }
 
 /**
@@ -849,7 +1030,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
  *     Mapping the response buffer, calling the command's handler,
  *     and handling the handler's return value.  This return value
  *     indicates whether the handler wishes the SCSI command to be
- *     completed successfully, or not.
+ *     completed successfully (0), or not (in which case cmd->result
+ *     and sense buffer are assumed to be set).
  *
  *     LOCKING:
  *     spin_lock_irqsave(host_set lock)
@@ -868,12 +1050,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
        rc = actor(args, rbuf, buflen);
        ata_scsi_rbuf_put(cmd, rbuf);
 
-       if (rc)
-               ata_bad_cdb(cmd, args->done);
-       else {
+       if (rc == 0)
                cmd->result = SAM_STAT_GOOD;
-               args->done(cmd);
-       }
+       args->done(cmd);
 }
 
 /**
@@ -1179,8 +1358,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
         * in the same manner)
         */
        page_control = scsicmd[2] >> 6;
-       if ((page_control != 0) && (page_control != 3))
-               return 1;
+       switch (page_control) {
+       case 0: /* current */
+               break;  /* supported */
+       case 3: /* saved */
+               goto saving_not_supp;
+       case 1: /* changeable */
+       case 2: /* defaults */
+       default:
+               goto invalid_fld;
+       }
 
        if (six_byte)
                output_len = 4;
@@ -1211,7 +1398,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
                break;
 
        default:                /* invalid page code */
-               return 1;
+               goto invalid_fld;
        }
 
        if (six_byte) {
@@ -1224,6 +1411,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
        }
 
        return 0;
+
+invalid_fld:
+       ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+       /* "Invalid field in cbd" */
+       return 1;
+
+saving_not_supp:
+       ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+        /* "Saving parameters not supported" */
+       return 1;
 }
 
 /**
@@ -1246,10 +1443,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
 
        VPRINTK("ENTER\n");
 
-       if (ata_id_has_lba48(args->id))
-               n_sectors = ata_id_u64(args->id, 100);
-       else
-               n_sectors = ata_id_u32(args->id, 60);
+       if (ata_id_has_lba(args->id)) {
+               if (ata_id_has_lba48(args->id))
+                       n_sectors = ata_id_u64(args->id, 100);
+               else
+                       n_sectors = ata_id_u32(args->id, 60);
+       } else {
+               /* CHS default translation */
+               n_sectors = args->id[1] * args->id[3] * args->id[6];
+
+               if (ata_id_current_chs_valid(args->id))
+                       /* CHS current translation */
+                       n_sectors = ata_id_u32(args->id, 57);
+       }
+
        n_sectors--;            /* ATA TotalUserSectors - 1 */
 
        if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1312,6 +1519,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
        return 0;
 }
 
+/**
+ *     ata_scsi_set_sense - Set SCSI sense data and status
+ *     @cmd: SCSI request to be handled
+ *     @sk: SCSI-defined sense key
+ *     @asc: SCSI-defined additional sense code
+ *     @ascq: SCSI-defined additional sense code qualifier
+ *
+ *     Helper function that builds a valid fixed format, current
+ *     response code and the given sense key (sk), additional sense
+ *     code (asc) and additional sense code qualifier (ascq) with
+ *     a SCSI command status of %SAM_STAT_CHECK_CONDITION and
+ *     DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
+ *
+ *     LOCKING:
+ *     Not required
+ */
+
+void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+{
+       cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+       cmd->sense_buffer[0] = 0x70;    /* fixed format, current */
+       cmd->sense_buffer[2] = sk;
+       cmd->sense_buffer[7] = 18 - 8;  /* additional sense length */
+       cmd->sense_buffer[12] = asc;
+       cmd->sense_buffer[13] = ascq;
+}
+
 /**
  *     ata_scsi_badcmd - End a SCSI request with an error
  *     @cmd: SCSI request to be handled
@@ -1330,30 +1565,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
 {
        DPRINTK("ENTER\n");
-       cmd->result = SAM_STAT_CHECK_CONDITION;
-
-       cmd->sense_buffer[0] = 0x70;
-       cmd->sense_buffer[2] = ILLEGAL_REQUEST;
-       cmd->sense_buffer[7] = 14 - 8;  /* addnl. sense len. FIXME: correct? */
-       cmd->sense_buffer[12] = asc;
-       cmd->sense_buffer[13] = ascq;
+       ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
 
        done(cmd);
 }
 
+void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+                        struct scsi_cmnd *cmd)
+{
+       DECLARE_COMPLETION(wait);
+       struct ata_queued_cmd *qc;
+       unsigned long flags;
+       int rc;
+
+       DPRINTK("ATAPI request sense\n");
+
+       qc = ata_qc_new_init(ap, dev);
+       BUG_ON(qc == NULL);
+
+       /* FIXME: is this needed? */
+       memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+       ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
+       qc->dma_dir = DMA_FROM_DEVICE;
+
+       memset(&qc->cdb, 0, ap->cdb_len);
+       qc->cdb[0] = REQUEST_SENSE;
+       qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+       qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+       qc->tf.command = ATA_CMD_PACKET;
+
+       qc->tf.protocol = ATA_PROT_ATAPI;
+       qc->tf.lbam = (8 * 1024) & 0xff;
+       qc->tf.lbah = (8 * 1024) >> 8;
+       qc->nbytes = SCSI_SENSE_BUFFERSIZE;
+
+       qc->waiting = &wait;
+       qc->complete_fn = ata_qc_complete_noop;
+
+       spin_lock_irqsave(&ap->host_set->lock, flags);
+       rc = ata_qc_issue(qc);
+       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+       if (rc)
+               ata_port_disable(ap);
+       else
+               wait_for_completion(&wait);
+
+       DPRINTK("EXIT\n");
+}
+
 static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
 {
        struct scsi_cmnd *cmd = qc->scsicmd;
 
-       if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) {
+       VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat);
+
+       if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
+               ata_to_sense_error(qc, drv_stat);
+
+       else if (unlikely(drv_stat & ATA_ERR)) {
                DPRINTK("request check condition\n");
 
+               /* FIXME: command completion with check condition
+                * but no sense causes the error handler to run,
+                * which then issues REQUEST SENSE, fills in the sense 
+                * buffer, and completes the command (for the second
+                * time).  We need to issue REQUEST SENSE some other
+                * way, to avoid completing the command twice.
+                */
                cmd->result = SAM_STAT_CHECK_CONDITION;
 
                qc->scsidone(cmd);
 
                return 1;
-       } else {
+       }
+
+       else {
                u8 *scsicmd = cmd->cmnd;
 
                if (scsicmd[0] == INQUIRY) {
@@ -1361,15 +1650,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
                        unsigned int buflen;
 
                        buflen = ata_scsi_rbuf_get(cmd, &buf);
-                       buf[2] = 0x5;
-                       buf[3] = (buf[3] & 0xf0) | 2;
+
+       /* ATAPI devices typically report zero for their SCSI version,
+        * and sometimes deviate from the spec WRT response data
+        * format.  If SCSI version is reported as zero like normal,
+        * then we make the following fixups:  1) Fake MMC-5 version,
+        * to indicate to the Linux scsi midlayer this is a modern
+        * device.  2) Ensure response data format / ATAPI information
+        * are always correct.
+        */
+       /* FIXME: do we ever override EVPD pages and the like, with
+        * this code?
+        */
+                       if (buf[2] == 0) {
+                               buf[2] = 0x5;
+                               buf[3] = 0x32;
+                       }
+
                        ata_scsi_rbuf_put(cmd, buf);
                }
+
                cmd->result = SAM_STAT_GOOD;
        }
 
        qc->scsidone(cmd);
-
        return 0;
 }
 /**
@@ -1384,7 +1688,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
  *     Zero on success, non-zero on failure.
  */
 
-static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
 {
        struct scsi_cmnd *cmd = qc->scsicmd;
        struct ata_device *dev = qc->dev;
@@ -1453,7 +1757,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
  */
 
 static struct ata_device *
-ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev)
+ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
 {
        struct ata_device *dev;
 
@@ -1610,7 +1914,7 @@ void ata_scsi_simulate(u16 *id,
                      void (*done)(struct scsi_cmnd *))
 {
        struct ata_scsi_args args;
-       u8 *scsicmd = cmd->cmnd;
+       const u8 *scsicmd = cmd->cmnd;
 
        args.id = id;
        args.cmd = cmd;
@@ -1630,7 +1934,7 @@ void ata_scsi_simulate(u16 *id,
 
                case INQUIRY:
                        if (scsicmd[1] & 2)                /* is CmdDt set?  */
-                               ata_bad_cdb(cmd, done);
+                               ata_scsi_invalid_field(cmd, done);
                        else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
                                ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
                        else if (scsicmd[2] == 0x00)
@@ -1640,7 +1944,7 @@ void ata_scsi_simulate(u16 *id,
                        else if (scsicmd[2] == 0x83)
                                ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
                        else
-                               ata_bad_cdb(cmd, done);
+                               ata_scsi_invalid_field(cmd, done);
                        break;
 
                case MODE_SENSE:
@@ -1650,7 +1954,7 @@ void ata_scsi_simulate(u16 *id,
 
                case MODE_SELECT:       /* unconditionally return */
                case MODE_SELECT_10:    /* bad-field-in-cdb */
-                       ata_bad_cdb(cmd, done);
+                       ata_scsi_invalid_field(cmd, done);
                        break;
 
                case READ_CAPACITY:
@@ -1661,7 +1965,7 @@ void ata_scsi_simulate(u16 *id,
                        if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
                                ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
                        else
-                               ata_bad_cdb(cmd, done);
+                               ata_scsi_invalid_field(cmd, done);
                        break;
 
                case REPORT_LUNS:
@@ -1673,8 +1977,26 @@ void ata_scsi_simulate(u16 *id,
 
                /* all other commands */
                default:
-                       ata_bad_scsiop(cmd, done);
+                       ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+                       /* "Invalid command operation code" */
+                       done(cmd);
                        break;
        }
 }
 
+void ata_scsi_scan_host(struct ata_port *ap)
+{
+       struct ata_device *dev;
+       unsigned int i;
+
+       if (ap->flags & ATA_FLAG_PORT_DISABLED)
+               return;
+
+       for (i = 0; i < ATA_MAX_DEVICES; i++) {
+               dev = &ap->device[i];
+
+               if (ata_dev_present(dev))
+                       scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
+       }
+}
+
index d608b3a0f6fe6897d108cd8f6557c8e4d53c1820..3d60190584ba00355b1de133f7d322d49c27c0af 100644 (file)
@@ -39,18 +39,23 @@ struct ata_scsi_args {
 
 /* libata-core.c */
 extern int atapi_enabled;
+extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
                                      struct ata_device *dev);
+extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
 extern int ata_qc_issue(struct ata_queued_cmd *qc);
 extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
 extern void ata_dev_select(struct ata_port *ap, unsigned int device,
                            unsigned int wait, unsigned int can_sleep);
-extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf);
 extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
 
 
 /* libata-scsi.c */
+extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+                        struct scsi_cmnd *cmd);
+extern void ata_scsi_scan_host(struct ata_port *ap);
 extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
 extern int ata_scsi_error(struct Scsi_Host *host);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -76,18 +81,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
 extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
                            void (*done)(struct scsi_cmnd *),
                            u8 asc, u8 ascq);
+extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
+                              u8 sk, u8 asc, u8 ascq);
 extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
 
-static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
-       ata_scsi_badcmd(cmd, done, 0x20, 0x00);
-}
-
-static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
-       ata_scsi_badcmd(cmd, done, 0x24, 0x00);
-}
-
 #endif /* __LIBATA_H__ */
index 0aba13ceaacfe9c2d6d976c882981df19aeb59bb..352df47bcaca1026d7090e3cd5d1b4d305ca3eb5 100644 (file)
@@ -39,7 +39,7 @@
 #define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
 
 static void *
-lpfc_pool_kmalloc(unsigned int gfp_flags, void *data)
+lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
 {
        return kmalloc((unsigned long)data, gfp_flags);
 }
index 3f2f2464fa6351ebbe8938c594e3f3cce3dec9c1..af1133104b3f95ef17986e41fde352761b6fcb9a 100644 (file)
@@ -5146,7 +5146,8 @@ static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned
 /* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
 static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
 {
-       int i, priority;
+       int i;
+       gfp_t priority;
        struct osst_buffer *tb;
 
        if (from_initialization)
@@ -5178,7 +5179,8 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d
 /* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
 static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
 {
-       int segs, nbr, max_segs, b_size, priority, order, got;
+       int segs, nbr, max_segs, b_size, order, got;
+       gfp_t priority;
 
        if (STbuffer->buffer_size >= OS_FRAME_SIZE)
                return 1;
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
new file mode 100644 (file)
index 0000000..9820f27
--- /dev/null
@@ -0,0 +1,739 @@
+/*
+ *  pdc_adma.c - Pacific Digital Corporation ADMA
+ *
+ *  Maintained by:  Mark Lord <mlord@pobox.com>
+ *
+ *  Copyright 2005 Mark Lord
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/DocBook/libata.*
+ *
+ *
+ *  Supports ATA disks in single-packet ADMA mode.
+ *  Uses PIO for everything else.
+ *
+ *  TODO:  Use ADMA transfers for ATAPI devices, when possible.
+ *  This requires careful attention to a number of quirks of the chip.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include <asm/io.h>
+#include <linux/libata.h>
+
+#define DRV_NAME       "pdc_adma"
+#define DRV_VERSION    "0.01"
+
+/* macro to calculate base address for ATA regs */
+#define ADMA_ATA_REGS(base,port_no)    ((base) + ((port_no) * 0x40))
+
+/* macro to calculate base address for ADMA regs */
+#define ADMA_REGS(base,port_no)        ((base) + 0x80 + ((port_no) * 0x20))
+
+enum {
+       ADMA_PORTS              = 2,
+       ADMA_CPB_BYTES          = 40,
+       ADMA_PRD_BYTES          = LIBATA_MAX_PRD * 16,
+       ADMA_PKT_BYTES          = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
+
+       ADMA_DMA_BOUNDARY       = 0xffffffff,
+
+       /* global register offsets */
+       ADMA_MODE_LOCK          = 0x00c7,
+
+       /* per-channel register offsets */
+       ADMA_CONTROL            = 0x0000, /* ADMA control */
+       ADMA_STATUS             = 0x0002, /* ADMA status */
+       ADMA_CPB_COUNT          = 0x0004, /* CPB count */
+       ADMA_CPB_CURRENT        = 0x000c, /* current CPB address */
+       ADMA_CPB_NEXT           = 0x000c, /* next CPB address */
+       ADMA_CPB_LOOKUP         = 0x0010, /* CPB lookup table */
+       ADMA_FIFO_IN            = 0x0014, /* input FIFO threshold */
+       ADMA_FIFO_OUT           = 0x0016, /* output FIFO threshold */
+
+       /* ADMA_CONTROL register bits */
+       aNIEN                   = (1 << 8), /* irq mask: 1==masked */
+       aGO                     = (1 << 7), /* packet trigger ("Go!") */
+       aRSTADM                 = (1 << 5), /* ADMA logic reset */
+       aRSTA                   = (1 << 2), /* ATA hard reset */
+       aPIOMD4                 = 0x0003,   /* PIO mode 4 */
+
+       /* ADMA_STATUS register bits */
+       aPSD                    = (1 << 6),
+       aUIRQ                   = (1 << 4),
+       aPERR                   = (1 << 0),
+
+       /* CPB bits */
+       cDONE                   = (1 << 0),
+       cVLD                    = (1 << 0),
+       cDAT                    = (1 << 2),
+       cIEN                    = (1 << 3),
+
+       /* PRD bits */
+       pORD                    = (1 << 4),
+       pDIRO                   = (1 << 5),
+       pEND                    = (1 << 7),
+
+       /* ATA register flags */
+       rIGN                    = (1 << 5),
+       rEND                    = (1 << 7),
+
+       /* ATA register addresses */
+       ADMA_REGS_CONTROL       = 0x0e,
+       ADMA_REGS_SECTOR_COUNT  = 0x12,
+       ADMA_REGS_LBA_LOW       = 0x13,
+       ADMA_REGS_LBA_MID       = 0x14,
+       ADMA_REGS_LBA_HIGH      = 0x15,
+       ADMA_REGS_DEVICE        = 0x16,
+       ADMA_REGS_COMMAND       = 0x17,
+
+       /* PCI device IDs */
+       board_1841_idx          = 0,    /* ADMA 2-port controller */
+};
+
+typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
+
+struct adma_port_priv {
+       u8                      *pkt;
+       dma_addr_t              pkt_dma;
+       adma_state_t            state;
+};
+
+static int adma_ata_init_one (struct pci_dev *pdev,
+                               const struct pci_device_id *ent);
+static irqreturn_t adma_intr (int irq, void *dev_instance,
+                               struct pt_regs *regs);
+static int adma_port_start(struct ata_port *ap);
+static void adma_host_stop(struct ata_host_set *host_set);
+static void adma_port_stop(struct ata_port *ap);
+static void adma_phy_reset(struct ata_port *ap);
+static void adma_qc_prep(struct ata_queued_cmd *qc);
+static int adma_qc_issue(struct ata_queued_cmd *qc);
+static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void adma_bmdma_stop(struct ata_queued_cmd *qc);
+static u8 adma_bmdma_status(struct ata_port *ap);
+static void adma_irq_clear(struct ata_port *ap);
+static void adma_eng_timeout(struct ata_port *ap);
+
+static Scsi_Host_Template adma_ata_sht = {
+       .module                 = THIS_MODULE,
+       .name                   = DRV_NAME,
+       .ioctl                  = ata_scsi_ioctl,
+       .queuecommand           = ata_scsi_queuecmd,
+       .eh_strategy_handler    = ata_scsi_error,
+       .can_queue              = ATA_DEF_QUEUE,
+       .this_id                = ATA_SHT_THIS_ID,
+       .sg_tablesize           = LIBATA_MAX_PRD,
+       .max_sectors            = ATA_MAX_SECTORS,
+       .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
+       .emulated               = ATA_SHT_EMULATED,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .proc_name              = DRV_NAME,
+       .dma_boundary           = ADMA_DMA_BOUNDARY,
+       .slave_configure        = ata_scsi_slave_config,
+       .bios_param             = ata_std_bios_param,
+};
+
+static const struct ata_port_operations adma_ata_ops = {
+       .port_disable           = ata_port_disable,
+       .tf_load                = ata_tf_load,
+       .tf_read                = ata_tf_read,
+       .check_status           = ata_check_status,
+       .check_atapi_dma        = adma_check_atapi_dma,
+       .exec_command           = ata_exec_command,
+       .dev_select             = ata_std_dev_select,
+       .phy_reset              = adma_phy_reset,
+       .qc_prep                = adma_qc_prep,
+       .qc_issue               = adma_qc_issue,
+       .eng_timeout            = adma_eng_timeout,
+       .irq_handler            = adma_intr,
+       .irq_clear              = adma_irq_clear,
+       .port_start             = adma_port_start,
+       .port_stop              = adma_port_stop,
+       .host_stop              = adma_host_stop,
+       .bmdma_stop             = adma_bmdma_stop,
+       .bmdma_status           = adma_bmdma_status,
+};
+
+static struct ata_port_info adma_port_info[] = {
+       /* board_1841_idx */
+       {
+               .sht            = &adma_ata_sht,
+               .host_flags     = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
+                                 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO,
+               .pio_mask       = 0x10, /* pio4 */
+               .udma_mask      = 0x1f, /* udma0-4 */
+               .port_ops       = &adma_ata_ops,
+       },
+};
+
+static struct pci_device_id adma_ata_pci_tbl[] = {
+       { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_1841_idx },
+
+       { }     /* terminate list */
+};
+
+static struct pci_driver adma_ata_pci_driver = {
+       .name                   = DRV_NAME,
+       .id_table               = adma_ata_pci_tbl,
+       .probe                  = adma_ata_init_one,
+       .remove                 = ata_pci_remove_one,
+};
+
+static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+       return 1;       /* ATAPI DMA not yet supported */
+}
+
+static void adma_bmdma_stop(struct ata_queued_cmd *qc)
+{
+       /* nothing */
+}
+
+static u8 adma_bmdma_status(struct ata_port *ap)
+{
+       return 0;
+}
+
+static void adma_irq_clear(struct ata_port *ap)
+{
+       /* nothing */
+}
+
+static void adma_reset_engine(void __iomem *chan)
+{
+       /* reset ADMA to idle state */
+       writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
+       udelay(2);
+       writew(aPIOMD4, chan + ADMA_CONTROL);
+       udelay(2);
+}
+
+static void adma_reinit_engine(struct ata_port *ap)
+{
+       struct adma_port_priv *pp = ap->private_data;
+       void __iomem *mmio_base = ap->host_set->mmio_base;
+       void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
+
+       /* mask/clear ATA interrupts */
+       writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
+       ata_check_status(ap);
+
+       /* reset the ADMA engine */
+       adma_reset_engine(chan);
+
+       /* set in-FIFO threshold to 0x100 */
+       writew(0x100, chan + ADMA_FIFO_IN);
+
+       /* set CPB pointer */
+       writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
+
+       /* set out-FIFO threshold to 0x100 */
+       writew(0x100, chan + ADMA_FIFO_OUT);
+
+       /* set CPB count */
+       writew(1, chan + ADMA_CPB_COUNT);
+
+       /* read/discard ADMA status */
+       readb(chan + ADMA_STATUS);
+}
+
+static inline void adma_enter_reg_mode(struct ata_port *ap)
+{
+       void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
+
+       writew(aPIOMD4, chan + ADMA_CONTROL);
+       readb(chan + ADMA_STATUS);      /* flush */
+}
+
+static void adma_phy_reset(struct ata_port *ap)
+{
+       struct adma_port_priv *pp = ap->private_data;
+
+       pp->state = adma_state_idle;
+       adma_reinit_engine(ap);
+       ata_port_probe(ap);
+       ata_bus_reset(ap);
+}
+
+static void adma_eng_timeout(struct ata_port *ap)
+{
+       struct adma_port_priv *pp = ap->private_data;
+
+       if (pp->state != adma_state_idle) /* healthy paranoia */
+               pp->state = adma_state_mmio;
+       adma_reinit_engine(ap);
+       ata_eng_timeout(ap);
+}
+
+static int adma_fill_sg(struct ata_queued_cmd *qc)
+{
+       struct scatterlist *sg = qc->sg;
+       struct ata_port *ap = qc->ap;
+       struct adma_port_priv *pp = ap->private_data;
+       u8  *buf = pp->pkt;
+       int nelem, i = (2 + buf[3]) * 8;
+       u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
+
+       for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) {
+               u32 addr;
+               u32 len;
+
+               addr = (u32)sg_dma_address(sg);
+               *(__le32 *)(buf + i) = cpu_to_le32(addr);
+               i += 4;
+
+               len = sg_dma_len(sg) >> 3;
+               *(__le32 *)(buf + i) = cpu_to_le32(len);
+               i += 4;
+
+               if ((nelem + 1) == qc->n_elem)
+                       pFLAGS |= pEND;
+               buf[i++] = pFLAGS;
+               buf[i++] = qc->dev->dma_mode & 0xf;
+               buf[i++] = 0;   /* pPKLW */
+               buf[i++] = 0;   /* reserved */
+
+               *(__le32 *)(buf + i)
+                       = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
+               i += 4;
+
+               VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", nelem,
+                                       (unsigned long)addr, len);
+       }
+       return i;
+}
+
+static void adma_qc_prep(struct ata_queued_cmd *qc)
+{
+       struct adma_port_priv *pp = qc->ap->private_data;
+       u8  *buf = pp->pkt;
+       u32 pkt_dma = (u32)pp->pkt_dma;
+       int i = 0;
+
+       VPRINTK("ENTER\n");
+
+       adma_enter_reg_mode(qc->ap);
+       if (qc->tf.protocol != ATA_PROT_DMA) {
+               ata_qc_prep(qc);
+               return;
+       }
+
+       buf[i++] = 0;   /* Response flags */
+       buf[i++] = 0;   /* reserved */
+       buf[i++] = cVLD | cDAT | cIEN;
+       i++;            /* cLEN, gets filled in below */
+
+       *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma);      /* cNCPB */
+       i += 4;         /* cNCPB */
+       i += 4;         /* cPRD, gets filled in below */
+
+       buf[i++] = 0;   /* reserved */
+       buf[i++] = 0;   /* reserved */
+       buf[i++] = 0;   /* reserved */
+       buf[i++] = 0;   /* reserved */
+
+       /* ATA registers; must be a multiple of 4 */
+       buf[i++] = qc->tf.device;
+       buf[i++] = ADMA_REGS_DEVICE;
+       if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
+               buf[i++] = qc->tf.hob_nsect;
+               buf[i++] = ADMA_REGS_SECTOR_COUNT;
+               buf[i++] = qc->tf.hob_lbal;
+               buf[i++] = ADMA_REGS_LBA_LOW;
+               buf[i++] = qc->tf.hob_lbam;
+               buf[i++] = ADMA_REGS_LBA_MID;
+               buf[i++] = qc->tf.hob_lbah;
+               buf[i++] = ADMA_REGS_LBA_HIGH;
+       }
+       buf[i++] = qc->tf.nsect;
+       buf[i++] = ADMA_REGS_SECTOR_COUNT;
+       buf[i++] = qc->tf.lbal;
+       buf[i++] = ADMA_REGS_LBA_LOW;
+       buf[i++] = qc->tf.lbam;
+       buf[i++] = ADMA_REGS_LBA_MID;
+       buf[i++] = qc->tf.lbah;
+       buf[i++] = ADMA_REGS_LBA_HIGH;
+       buf[i++] = 0;
+       buf[i++] = ADMA_REGS_CONTROL;
+       buf[i++] = rIGN;
+       buf[i++] = 0;
+       buf[i++] = qc->tf.command;
+       buf[i++] = ADMA_REGS_COMMAND | rEND;
+
+       buf[3] = (i >> 3) - 2;                          /* cLEN */
+       *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i);  /* cPRD */
+
+       i = adma_fill_sg(qc);
+       wmb();  /* flush PRDs and pkt to memory */
+#if 0
+       /* dump out CPB + PRDs for debug */
+       {
+               int j, len = 0;
+               static char obuf[2048];
+               for (j = 0; j < i; ++j) {
+                       len += sprintf(obuf+len, "%02x ", buf[j]);
+                       if ((j & 7) == 7) {
+                               printk("%s\n", obuf);
+                               len = 0;
+                       }
+               }
+               if (len)
+                       printk("%s\n", obuf);
+       }
+#endif
+}
+
+static inline void adma_packet_start(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
+
+       VPRINTK("ENTER, ap %p\n", ap);
+
+       /* fire up the ADMA engine */
+       writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
+}
+
+static int adma_qc_issue(struct ata_queued_cmd *qc)
+{
+       struct adma_port_priv *pp = qc->ap->private_data;
+
+       switch (qc->tf.protocol) {
+       case ATA_PROT_DMA:
+               pp->state = adma_state_pkt;
+               adma_packet_start(qc);
+               return 0;
+
+       case ATA_PROT_ATAPI_DMA:
+               BUG();
+               break;
+
+       default:
+               break;
+       }
+
+       pp->state = adma_state_mmio;
+       return ata_qc_issue_prot(qc);
+}
+
+static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
+{
+       unsigned int handled = 0, port_no;
+       u8 __iomem *mmio_base = host_set->mmio_base;
+
+       for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
+               struct ata_port *ap = host_set->ports[port_no];
+               struct adma_port_priv *pp;
+               struct ata_queued_cmd *qc;
+               void __iomem *chan = ADMA_REGS(mmio_base, port_no);
+               u8 drv_stat, status = readb(chan + ADMA_STATUS);
+
+               if (status == 0)
+                       continue;
+               handled = 1;
+               adma_enter_reg_mode(ap);
+               if ((ap->flags & ATA_FLAG_PORT_DISABLED))
+                       continue;
+               pp = ap->private_data;
+               if (!pp || pp->state != adma_state_pkt)
+                       continue;
+               qc = ata_qc_from_tag(ap, ap->active_tag);
+               drv_stat = 0;
+               if ((status & (aPERR | aPSD | aUIRQ)))
+                       drv_stat = ATA_ERR;
+               else if (pp->pkt[0] != cDONE)
+                       drv_stat = ATA_ERR;
+               ata_qc_complete(qc, drv_stat);
+       }
+       return handled;
+}
+
+static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
+{
+       unsigned int handled = 0, port_no;
+
+       for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
+               struct ata_port *ap;
+               ap = host_set->ports[port_no];
+               if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) {
+                       struct ata_queued_cmd *qc;
+                       struct adma_port_priv *pp = ap->private_data;
+                       if (!pp || pp->state != adma_state_mmio)
+                               continue;
+                       qc = ata_qc_from_tag(ap, ap->active_tag);
+                       if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
+
+                               /* check main status, clearing INTRQ */
+                               u8 status = ata_chk_status(ap);
+                               if ((status & ATA_BUSY))
+                                       continue;
+                               DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+                                       ap->id, qc->tf.protocol, status);
+               
+                               /* complete taskfile transaction */
+                               pp->state = adma_state_idle;
+                               ata_qc_complete(qc, status);
+                               handled = 1;
+                       }
+               }
+       }
+       return handled;
+}
+
+static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ata_host_set *host_set = dev_instance;
+       unsigned int handled = 0;
+
+       VPRINTK("ENTER\n");
+
+       spin_lock(&host_set->lock);
+       handled  = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
+       spin_unlock(&host_set->lock);
+
+       VPRINTK("EXIT\n");
+
+       return IRQ_RETVAL(handled);
+}
+
+static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
+{
+       port->cmd_addr          =
+       port->data_addr         = base + 0x000;
+       port->error_addr        =
+       port->feature_addr      = base + 0x004;
+       port->nsect_addr        = base + 0x008;
+       port->lbal_addr         = base + 0x00c;
+       port->lbam_addr         = base + 0x010;
+       port->lbah_addr         = base + 0x014;
+       port->device_addr       = base + 0x018;
+       port->status_addr       =
+       port->command_addr      = base + 0x01c;
+       port->altstatus_addr    =
+       port->ctl_addr          = base + 0x038;
+}
+
+static int adma_port_start(struct ata_port *ap)
+{
+       struct device *dev = ap->host_set->dev;
+       struct adma_port_priv *pp;
+       int rc;
+
+       rc = ata_port_start(ap);
+       if (rc)
+               return rc;
+       adma_enter_reg_mode(ap);
+       rc = -ENOMEM;
+       pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
+       if (!pp)
+               goto err_out;
+       pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
+                                                               GFP_KERNEL);
+       if (!pp->pkt)
+               goto err_out_kfree;
+       /* paranoia? */
+       if ((pp->pkt_dma & 7) != 0) {
+               printk("bad alignment for pp->pkt_dma: %08x\n",
+                                               (u32)pp->pkt_dma);
+               goto err_out_kfree2;
+       }
+       memset(pp->pkt, 0, ADMA_PKT_BYTES);
+       ap->private_data = pp;
+       adma_reinit_engine(ap);
+       return 0;
+
+err_out_kfree2:
+       kfree(pp);
+err_out_kfree:
+       kfree(pp);
+err_out:
+       ata_port_stop(ap);
+       return rc;
+}
+
+static void adma_port_stop(struct ata_port *ap)
+{
+       struct device *dev = ap->host_set->dev;
+       struct adma_port_priv *pp = ap->private_data;
+
+       adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
+       if (pp != NULL) {
+               ap->private_data = NULL;
+               if (pp->pkt != NULL)
+                       dma_free_coherent(dev, ADMA_PKT_BYTES,
+                                       pp->pkt, pp->pkt_dma);
+               kfree(pp);
+       }
+       ata_port_stop(ap);
+}
+
+static void adma_host_stop(struct ata_host_set *host_set)
+{
+       unsigned int port_no;
+
+       for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
+               adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
+
+       ata_pci_host_stop(host_set);
+}
+
+static void adma_host_init(unsigned int chip_id,
+                               struct ata_probe_ent *probe_ent)
+{
+       unsigned int port_no;
+       void __iomem *mmio_base = probe_ent->mmio_base;
+
+       /* enable/lock aGO operation */
+       writeb(7, mmio_base + ADMA_MODE_LOCK);
+
+       /* reset the ADMA logic */
+       for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
+               adma_reset_engine(ADMA_REGS(mmio_base, port_no));
+}
+
+static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
+{
+       int rc;
+
+       rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+       if (rc) {
+               printk(KERN_ERR DRV_NAME
+                       "(%s): 32-bit DMA enable failed\n",
+                       pci_name(pdev));
+               return rc;
+       }
+       rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+       if (rc) {
+               printk(KERN_ERR DRV_NAME
+                       "(%s): 32-bit consistent DMA enable failed\n",
+                       pci_name(pdev));
+               return rc;
+       }
+       return 0;
+}
+
+static int adma_ata_init_one(struct pci_dev *pdev,
+                               const struct pci_device_id *ent)
+{
+       static int printed_version;
+       struct ata_probe_ent *probe_ent = NULL;
+       void __iomem *mmio_base;
+       unsigned int board_idx = (unsigned int) ent->driver_data;
+       int rc, port_no;
+
+       if (!printed_version++)
+               printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+       rc = pci_enable_device(pdev);
+       if (rc)
+               return rc;
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc)
+               goto err_out;
+
+       if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
+               rc = -ENODEV;
+               goto err_out_regions;
+       }
+
+       mmio_base = pci_iomap(pdev, 4, 0);
+       if (mmio_base == NULL) {
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       rc = adma_set_dma_masks(pdev, mmio_base);
+       if (rc)
+               goto err_out_iounmap;
+
+       probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
+       if (probe_ent == NULL) {
+               rc = -ENOMEM;
+               goto err_out_iounmap;
+       }
+
+       probe_ent->dev = pci_dev_to_dev(pdev);
+       INIT_LIST_HEAD(&probe_ent->node);
+
+       probe_ent->sht          = adma_port_info[board_idx].sht;
+       probe_ent->host_flags   = adma_port_info[board_idx].host_flags;
+       probe_ent->pio_mask     = adma_port_info[board_idx].pio_mask;
+       probe_ent->mwdma_mask   = adma_port_info[board_idx].mwdma_mask;
+       probe_ent->udma_mask    = adma_port_info[board_idx].udma_mask;
+       probe_ent->port_ops     = adma_port_info[board_idx].port_ops;
+
+       probe_ent->irq          = pdev->irq;
+       probe_ent->irq_flags    = SA_SHIRQ;
+       probe_ent->mmio_base    = mmio_base;
+       probe_ent->n_ports      = ADMA_PORTS;
+
+       for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
+               adma_ata_setup_port(&probe_ent->port[port_no],
+                       ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
+       }
+
+       pci_set_master(pdev);
+
+       /* initialize adapter */
+       adma_host_init(board_idx, probe_ent);
+
+       rc = ata_device_add(probe_ent);
+       kfree(probe_ent);
+       if (rc != ADMA_PORTS)
+               goto err_out_iounmap;
+       return 0;
+
+err_out_iounmap:
+       pci_iounmap(pdev, mmio_base);
+err_out_regions:
+       pci_release_regions(pdev);
+err_out:
+       pci_disable_device(pdev);
+       return rc;
+}
+
+static int __init adma_ata_init(void)
+{
+       return pci_module_init(&adma_ata_pci_driver);
+}
+
+static void __exit adma_ata_exit(void)
+{
+       pci_unregister_driver(&adma_ata_pci_driver);
+}
+
+MODULE_AUTHOR("Mark Lord");
+MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(adma_ata_init);
+module_exit(adma_ata_exit);
index 1ed32e7b5472091ef7324e154dfdcf32464f0ae8..e451941ad81d11ce3ce7a601cf07a916cf1bf22f 100644 (file)
@@ -52,7 +52,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
 extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
 extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *);
 
-extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, int);
+extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
 
 extern int qla2x00_loop_resync(scsi_qla_host_t *);
 
@@ -277,7 +277,7 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *);
 /*
  * Global Function Prototypes in qla_rscn.c source file.
  */
-extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, int);
+extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, gfp_t);
 extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *,
     int);
 extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *);
index 23d095d3817b8cb8b35d431564083f1a03da9b71..fbb6feee40cfeb1921ead1abd59a1a970b57a9a7 100644 (file)
@@ -1685,7 +1685,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
  * Returns a pointer to the allocated fcport, or NULL, if none available.
  */
 fc_port_t *
-qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
+qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
 {
        fc_port_t *fcport;
 
index 1eba988286360efd49a696eb80613e5310953663..7534efcc891860e7598cdffb28389b48bf16e312 100644 (file)
@@ -1066,7 +1066,7 @@ qla2x00_send_login_iocb_cb(scsi_qla_host_t *ha, struct io_descriptor *iodesc,
  * Returns a pointer to the allocated RSCN fcport, or NULL, if none available.
  */
 fc_port_t *
-qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, int flags)
+qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, gfp_t flags)
 {
        fc_port_t *fcport;
 
index ea76fe44585e8f6b96e6f8ffd6dc6ce6dd558db1..422e0b6f603ac5b8960073def04a2757f8ed79e8 100644 (file)
@@ -35,7 +35,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "sata_mv"
-#define DRV_VERSION    "0.12"
+#define DRV_VERSION    "0.25"
 
 enum {
        /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +55,61 @@ enum {
        MV_SATAHC_ARBTR_REG_SZ  = MV_MINOR_REG_AREA_SZ,         /* arbiter */
        MV_PORT_REG_SZ          = MV_MINOR_REG_AREA_SZ,
 
-       MV_Q_CT                 = 32,
-       MV_CRQB_SZ              = 32,
-       MV_CRPB_SZ              = 8,
+       MV_USE_Q_DEPTH          = ATA_DEF_QUEUE,
 
-       MV_DMA_BOUNDARY         = 0xffffffffU,
-       SATAHC_MASK             = (~(MV_SATAHC_REG_SZ - 1)),
+       MV_MAX_Q_DEPTH          = 32,
+       MV_MAX_Q_DEPTH_MASK     = MV_MAX_Q_DEPTH - 1,
+
+       /* CRQB needs alignment on a 1KB boundary. Size == 1KB
+        * CRPB needs alignment on a 256B boundary. Size == 256B
+        * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
+        * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
+        */
+       MV_CRQB_Q_SZ            = (32 * MV_MAX_Q_DEPTH),
+       MV_CRPB_Q_SZ            = (8 * MV_MAX_Q_DEPTH),
+       MV_MAX_SG_CT            = 176,
+       MV_SG_TBL_SZ            = (16 * MV_MAX_SG_CT),
+       MV_PORT_PRIV_DMA_SZ     = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
+
+       /* Our DMA boundary is determined by an ePRD being unable to handle
+        * anything larger than 64KB
+        */
+       MV_DMA_BOUNDARY         = 0xffffU,
 
        MV_PORTS_PER_HC         = 4,
        /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
        MV_PORT_HC_SHIFT        = 2,
-       /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */
+       /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
        MV_PORT_MASK            = 3,
 
        /* Host Flags */
        MV_FLAG_DUAL_HC         = (1 << 30),  /* two SATA Host Controllers */
        MV_FLAG_IRQ_COALESCE    = (1 << 29),  /* IRQ coalescing capability */
-       MV_FLAG_BDMA            = (1 << 28),  /* Basic DMA */
+       MV_FLAG_GLBL_SFT_RST    = (1 << 28),  /* Global Soft Reset support */
+       MV_COMMON_FLAGS         = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+                                  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
+       MV_6XXX_FLAGS           = (MV_FLAG_IRQ_COALESCE | 
+                                  MV_FLAG_GLBL_SFT_RST),
 
        chip_504x               = 0,
        chip_508x               = 1,
        chip_604x               = 2,
        chip_608x               = 3,
 
+       CRQB_FLAG_READ          = (1 << 0),
+       CRQB_TAG_SHIFT          = 1,
+       CRQB_CMD_ADDR_SHIFT     = 8,
+       CRQB_CMD_CS             = (0x2 << 11),
+       CRQB_CMD_LAST           = (1 << 15),
+
+       CRPB_FLAG_STATUS_SHIFT  = 8,
+
+       EPRD_FLAG_END_OF_TBL    = (1 << 31),
+
        /* PCI interface registers */
 
+       PCI_COMMAND_OFS         = 0xc00,
+
        PCI_MAIN_CMD_STS_OFS    = 0xd30,
        STOP_PCI_MASTER         = (1 << 2),
        PCI_MASTER_EMPTY        = (1 << 3),
@@ -111,20 +141,13 @@ enum {
        HC_CFG_OFS              = 0,
 
        HC_IRQ_CAUSE_OFS        = 0x14,
-       CRBP_DMA_DONE           = (1 << 0),     /* shift by port # */
+       CRPB_DMA_DONE           = (1 << 0),     /* shift by port # */
        HC_IRQ_COAL             = (1 << 4),     /* IRQ coalescing */
        DEV_IRQ                 = (1 << 8),     /* shift by port # */
 
        /* Shadow block registers */
-       SHD_PIO_DATA_OFS        = 0x100,
-       SHD_FEA_ERR_OFS         = 0x104,
-       SHD_SECT_CNT_OFS        = 0x108,
-       SHD_LBA_L_OFS           = 0x10C,
-       SHD_LBA_M_OFS           = 0x110,
-       SHD_LBA_H_OFS           = 0x114,
-       SHD_DEV_HD_OFS          = 0x118,
-       SHD_CMD_STA_OFS         = 0x11C,
-       SHD_CTL_AST_OFS         = 0x120,
+       SHD_BLK_OFS             = 0x100,
+       SHD_CTL_AST_OFS         = 0x20,         /* ofs from SHD_BLK_OFS */
 
        /* SATA registers */
        SATA_STATUS_OFS         = 0x300,  /* ctrl, err regs follow status */
@@ -132,6 +155,11 @@ enum {
 
        /* Port registers */
        EDMA_CFG_OFS            = 0,
+       EDMA_CFG_Q_DEPTH        = 0,                    /* queueing disabled */
+       EDMA_CFG_NCQ            = (1 << 5),
+       EDMA_CFG_NCQ_GO_ON_ERR  = (1 << 14),            /* continue on error */
+       EDMA_CFG_RD_BRST_EXT    = (1 << 11),            /* read burst 512B */
+       EDMA_CFG_WR_BUFF_LEN    = (1 << 13),            /* write buffer 512B */
 
        EDMA_ERR_IRQ_CAUSE_OFS  = 0x8,
        EDMA_ERR_IRQ_MASK_OFS   = 0xc,
@@ -161,33 +189,85 @@ enum {
                                   EDMA_ERR_LNK_DATA_TX | 
                                   EDMA_ERR_TRANS_PROTO),
 
+       EDMA_REQ_Q_BASE_HI_OFS  = 0x10,
+       EDMA_REQ_Q_IN_PTR_OFS   = 0x14,         /* also contains BASE_LO */
+       EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
+
+       EDMA_REQ_Q_OUT_PTR_OFS  = 0x18,
+       EDMA_REQ_Q_PTR_SHIFT    = 5,
+
+       EDMA_RSP_Q_BASE_HI_OFS  = 0x1c,
+       EDMA_RSP_Q_IN_PTR_OFS   = 0x20,
+       EDMA_RSP_Q_OUT_PTR_OFS  = 0x24,         /* also contains BASE_LO */
+       EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
+       EDMA_RSP_Q_PTR_SHIFT    = 3,
+
        EDMA_CMD_OFS            = 0x28,
        EDMA_EN                 = (1 << 0),
        EDMA_DS                 = (1 << 1),
        ATA_RST                 = (1 << 2),
 
-       /* BDMA is 6xxx part only */
-       BDMA_CMD_OFS            = 0x224,
-       BDMA_START              = (1 << 0),
+       /* Host private flags (hp_flags) */
+       MV_HP_FLAG_MSI          = (1 << 0),
 
-       MV_UNDEF                = 0,
+       /* Port private flags (pp_flags) */
+       MV_PP_FLAG_EDMA_EN      = (1 << 0),
+       MV_PP_FLAG_EDMA_DS_ACT  = (1 << 1),
 };
 
-struct mv_port_priv {
+/* Command ReQuest Block: 32B */
+struct mv_crqb {
+       u32                     sg_addr;
+       u32                     sg_addr_hi;
+       u16                     ctrl_flags;
+       u16                     ata_cmd[11];
+};
 
+/* Command ResPonse Block: 8B */
+struct mv_crpb {
+       u16                     id;
+       u16                     flags;
+       u32                     tmstmp;
 };
 
-struct mv_host_priv {
+/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
+struct mv_sg {
+       u32                     addr;
+       u32                     flags_size;
+       u32                     addr_hi;
+       u32                     reserved;
+};
 
+struct mv_port_priv {
+       struct mv_crqb          *crqb;
+       dma_addr_t              crqb_dma;
+       struct mv_crpb          *crpb;
+       dma_addr_t              crpb_dma;
+       struct mv_sg            *sg_tbl;
+       dma_addr_t              sg_tbl_dma;
+
+       unsigned                req_producer;           /* cp of req_in_ptr */
+       unsigned                rsp_consumer;           /* cp of rsp_out_ptr */
+       u32                     pp_flags;
+};
+
+struct mv_host_priv {
+       u32                     hp_flags;
 };
 
 static void mv_irq_clear(struct ata_port *ap);
 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
+static u8 mv_check_err(struct ata_port *ap);
 static void mv_phy_reset(struct ata_port *ap);
-static int mv_master_reset(void __iomem *mmio_base);
+static void mv_host_stop(struct ata_host_set *host_set);
+static int mv_port_start(struct ata_port *ap);
+static void mv_port_stop(struct ata_port *ap);
+static void mv_qc_prep(struct ata_queued_cmd *qc);
+static int mv_qc_issue(struct ata_queued_cmd *qc);
 static irqreturn_t mv_interrupt(int irq, void *dev_instance,
                                struct pt_regs *regs);
+static void mv_eng_timeout(struct ata_port *ap);
 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 
 static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
        .ioctl                  = ata_scsi_ioctl,
        .queuecommand           = ata_scsi_queuecmd,
        .eh_strategy_handler    = ata_scsi_error,
-       .can_queue              = ATA_DEF_QUEUE,
+       .can_queue              = MV_USE_Q_DEPTH,
        .this_id                = ATA_SHT_THIS_ID,
-       .sg_tablesize           = MV_UNDEF,
+       .sg_tablesize           = MV_MAX_SG_CT,
        .max_sectors            = ATA_MAX_SECTORS,
        .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
        .emulated               = ATA_SHT_EMULATED,
-       .use_clustering         = MV_UNDEF,
+       .use_clustering         = ATA_SHT_USE_CLUSTERING,
        .proc_name              = DRV_NAME,
        .dma_boundary           = MV_DMA_BOUNDARY,
        .slave_configure        = ata_scsi_slave_config,
@@ -210,21 +290,22 @@ static Scsi_Host_Template mv_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations mv_ops = {
+static const struct ata_port_operations mv_ops = {
        .port_disable           = ata_port_disable,
 
        .tf_load                = ata_tf_load,
        .tf_read                = ata_tf_read,
        .check_status           = ata_check_status,
+       .check_err              = mv_check_err,
        .exec_command           = ata_exec_command,
        .dev_select             = ata_std_dev_select,
 
        .phy_reset              = mv_phy_reset,
 
-       .qc_prep                = ata_qc_prep,
-       .qc_issue               = ata_qc_issue_prot,
+       .qc_prep                = mv_qc_prep,
+       .qc_issue               = mv_qc_issue,
 
-       .eng_timeout            = ata_eng_timeout,
+       .eng_timeout            = mv_eng_timeout,
 
        .irq_handler            = mv_interrupt,
        .irq_clear              = mv_irq_clear,
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = {
        .scr_read               = mv_scr_read,
        .scr_write              = mv_scr_write,
 
-       .port_start             = ata_port_start,
-       .port_stop              = ata_port_stop,
-       .host_stop              = ata_host_stop,
+       .port_start             = mv_port_start,
+       .port_stop              = mv_port_stop,
+       .host_stop              = mv_host_stop,
 };
 
 static struct ata_port_info mv_port_info[] = {
        {  /* chip_504x */
                .sht            = &mv_sht,
-               .host_flags     = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-                                  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
-               .pio_mask       = 0x1f, /* pio4-0 */
-               .udma_mask      = 0,    /* 0x7f (udma6-0 disabled for now) */
+               .host_flags     = MV_COMMON_FLAGS,
+               .pio_mask       = 0x1f, /* pio0-4 */
+               .udma_mask      = 0,    /* 0x7f (udma0-6 disabled for now) */
                .port_ops       = &mv_ops,
        },
        {  /* chip_508x */
                .sht            = &mv_sht,
-               .host_flags     = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-                                  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 
-                                  MV_FLAG_DUAL_HC),
-               .pio_mask       = 0x1f, /* pio4-0 */
-               .udma_mask      = 0,    /* 0x7f (udma6-0 disabled for now) */
+               .host_flags     = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+               .pio_mask       = 0x1f, /* pio0-4 */
+               .udma_mask      = 0,    /* 0x7f (udma0-6 disabled for now) */
                .port_ops       = &mv_ops,
        },
        {  /* chip_604x */
                .sht            = &mv_sht,
-               .host_flags     = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-                                  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 
-                                  MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA),
-               .pio_mask       = 0x1f, /* pio4-0 */
-               .udma_mask      = 0,    /* 0x7f (udma6-0 disabled for now) */
+               .host_flags     = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+               .pio_mask       = 0x1f, /* pio0-4 */
+               .udma_mask      = 0x7f, /* udma0-6 */
                .port_ops       = &mv_ops,
        },
        {  /* chip_608x */
                .sht            = &mv_sht,
-               .host_flags     = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-                                  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
-                                  MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC |
-                                  MV_FLAG_BDMA),
-               .pio_mask       = 0x1f, /* pio4-0 */
-               .udma_mask      = 0,    /* 0x7f (udma6-0 disabled for now) */
+               .host_flags     = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 
+                                  MV_FLAG_DUAL_HC),
+               .pio_mask       = 0x1f, /* pio0-4 */
+               .udma_mask      = 0x7f, /* udma0-6 */
                .port_ops       = &mv_ops,
        },
 };
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
        (void) readl(addr);     /* flush to avoid PCI posted write */
 }
 
-static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
-{
-       return ((void __iomem *)((unsigned long)port_mmio & 
-                                (unsigned long)SATAHC_MASK));
-}
-
 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 {
        return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
        return mv_port_base(ap->host_set->mmio_base, ap->port_no);
 }
 
-static inline int mv_get_hc_count(unsigned long flags)
+static inline int mv_get_hc_count(unsigned long hp_flags)
 {
-       return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1);
+       return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 }
 
-static inline int mv_is_edma_active(struct ata_port *ap)
+static void mv_irq_clear(struct ata_port *ap)
+{
+}
+
+/**
+ *      mv_start_dma - Enable eDMA engine
+ *      @base: port base address
+ *      @pp: port private data
+ *
+ *      Verify the local cache of the eDMA state is accurate with an
+ *      assert.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
+{
+       if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
+               writelfl(EDMA_EN, base + EDMA_CMD_OFS);
+               pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
+       }
+       assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
+}
+
+/**
+ *      mv_stop_dma - Disable eDMA engine
+ *      @ap: ATA channel to manipulate
+ *
+ *      Verify the local cache of the eDMA state is accurate with an
+ *      assert.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_stop_dma(struct ata_port *ap)
 {
        void __iomem *port_mmio = mv_ap_base(ap);
-       return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
+       struct mv_port_priv *pp = ap->private_data;
+       u32 reg;
+       int i;
+
+       if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
+               /* Disable EDMA if active.   The disable bit auto clears.
+                */
+               writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
+               pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+       } else {
+               assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
+       }
+       
+       /* now properly wait for the eDMA to stop */
+       for (i = 1000; i > 0; i--) {
+               reg = readl(port_mmio + EDMA_CMD_OFS);
+               if (!(EDMA_EN & reg)) {
+                       break;
+               }
+               udelay(100);
+       }
+
+       if (EDMA_EN & reg) {
+               printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
+               /* FIXME: Consider doing a reset here to recover */
+       }
 }
 
-static inline int mv_port_bdma_capable(struct ata_port *ap)
+#ifdef ATA_DEBUG
+static void mv_dump_mem(void __iomem *start, unsigned bytes)
 {
-       return (ap->flags & MV_FLAG_BDMA);
+       int b, w;
+       for (b = 0; b < bytes; ) {
+               DPRINTK("%p: ", start + b);
+               for (w = 0; b < bytes && w < 4; w++) {
+                       printk("%08x ",readl(start + b));
+                       b += sizeof(u32);
+               }
+               printk("\n");
+       }
 }
+#endif
 
-static void mv_irq_clear(struct ata_port *ap)
+static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
+{
+#ifdef ATA_DEBUG
+       int b, w;
+       u32 dw;
+       for (b = 0; b < bytes; ) {
+               DPRINTK("%02x: ", b);
+               for (w = 0; b < bytes && w < 4; w++) {
+                       (void) pci_read_config_dword(pdev,b,&dw);
+                       printk("%08x ",dw);
+                       b += sizeof(u32);
+               }
+               printk("\n");
+       }
+#endif
+}
+static void mv_dump_all_regs(void __iomem *mmio_base, int port,
+                            struct pci_dev *pdev)
 {
+#ifdef ATA_DEBUG
+       void __iomem *hc_base = mv_hc_base(mmio_base, 
+                                          port >> MV_PORT_HC_SHIFT);
+       void __iomem *port_base;
+       int start_port, num_ports, p, start_hc, num_hcs, hc;
+
+       if (0 > port) {
+               start_hc = start_port = 0;
+               num_ports = 8;          /* shld be benign for 4 port devs */
+               num_hcs = 2;
+       } else {
+               start_hc = port >> MV_PORT_HC_SHIFT;
+               start_port = port;
+               num_ports = num_hcs = 1;
+       }
+       DPRINTK("All registers for port(s) %u-%u:\n", start_port, 
+               num_ports > 1 ? num_ports - 1 : start_port);
+
+       if (NULL != pdev) {
+               DPRINTK("PCI config space regs:\n");
+               mv_dump_pci_cfg(pdev, 0x68);
+       }
+       DPRINTK("PCI regs:\n");
+       mv_dump_mem(mmio_base+0xc00, 0x3c);
+       mv_dump_mem(mmio_base+0xd00, 0x34);
+       mv_dump_mem(mmio_base+0xf00, 0x4);
+       mv_dump_mem(mmio_base+0x1d00, 0x6c);
+       for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
+               hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
+               DPRINTK("HC regs (HC %i):\n", hc);
+               mv_dump_mem(hc_base, 0x1c);
+       }
+       for (p = start_port; p < start_port + num_ports; p++) {
+               port_base = mv_port_base(mmio_base, p);
+               DPRINTK("EDMA regs (port %i):\n",p);
+               mv_dump_mem(port_base, 0x54);
+               DPRINTK("SATA regs (port %i):\n",p);
+               mv_dump_mem(port_base+0x300, 0x60);
+       }
+#endif
 }
 
 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
        }
 }
 
-static int mv_master_reset(void __iomem *mmio_base)
+/**
+ *      mv_global_soft_reset - Perform the 6xxx global soft reset
+ *      @mmio_base: base address of the HBA
+ *
+ *      This routine only applies to 6xxx parts.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_global_soft_reset(void __iomem *mmio_base)
 {
        void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
        int i, rc = 0;
        u32 t;
 
-       VPRINTK("ENTER\n");
-
        /* Following procedure defined in PCI "main command and status
         * register" table.
         */
        t = readl(reg);
        writel(t | STOP_PCI_MASTER, reg);
 
-       for (i = 0; i < 100; i++) {
-               msleep(10);
+       for (i = 0; i < 1000; i++) {
+               udelay(1);
                t = readl(reg);
                if (PCI_MASTER_EMPTY & t) {
                        break;
                }
        }
        if (!(PCI_MASTER_EMPTY & t)) {
-               printk(KERN_ERR DRV_NAME "PCI master won't flush\n");
-               rc = 1;         /* broken HW? */
+               printk(KERN_ERR DRV_NAME "PCI master won't flush\n");
+               rc = 1;
                goto done;
        }
 
@@ -425,39 +626,399 @@ static int mv_master_reset(void __iomem *mmio_base)
        } while (!(GLOB_SFT_RST & t) && (i-- > 0));
 
        if (!(GLOB_SFT_RST & t)) {
-               printk(KERN_ERR DRV_NAME "can't set global reset\n");
-               rc = 1;         /* broken HW? */
+               printk(KERN_ERR DRV_NAME "can't set global reset\n");
+               rc = 1;
                goto done;
        }
 
-       /* clear reset */
+       /* clear reset and *reenable the PCI master* (not mentioned in spec) */
        i = 5;
        do {
-               writel(t & ~GLOB_SFT_RST, reg);
+               writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
                t = readl(reg);
                udelay(1);
        } while ((GLOB_SFT_RST & t) && (i-- > 0));
 
        if (GLOB_SFT_RST & t) {
-               printk(KERN_ERR DRV_NAME "can't clear global reset\n");
-               rc = 1;         /* broken HW? */
+               printk(KERN_ERR DRV_NAME "can't clear global reset\n");
+               rc = 1;
        }
-
- done:
-       VPRINTK("EXIT, rc = %i\n", rc);
+done:
        return rc;
 }
 
-static void mv_err_intr(struct ata_port *ap)
+/**
+ *      mv_host_stop - Host specific cleanup/stop routine.
+ *      @host_set: host data structure
+ *
+ *      Disable ints, cleanup host memory, call general purpose
+ *      host_stop.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_host_stop(struct ata_host_set *host_set)
 {
-       void __iomem *port_mmio;
-       u32 edma_err_cause, serr = 0;
+       struct mv_host_priv *hpriv = host_set->private_data;
+       struct pci_dev *pdev = to_pci_dev(host_set->dev);
+
+       if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
+               pci_disable_msi(pdev);
+       } else {
+               pci_intx(pdev, 0);
+       }
+       kfree(hpriv);
+       ata_host_stop(host_set);
+}
+
+/**
+ *      mv_port_start - Port specific init/start routine.
+ *      @ap: ATA channel to manipulate
+ *
+ *      Allocate and point to DMA memory, init port private memory,
+ *      zero indices.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_port_start(struct ata_port *ap)
+{
+       struct device *dev = ap->host_set->dev;
+       struct mv_port_priv *pp;
+       void __iomem *port_mmio = mv_ap_base(ap);
+       void *mem;
+       dma_addr_t mem_dma;
+
+       pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+       if (!pp) {
+               return -ENOMEM;
+       }
+       memset(pp, 0, sizeof(*pp));
+
+       mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 
+                                GFP_KERNEL);
+       if (!mem) {
+               kfree(pp);
+               return -ENOMEM;
+       }
+       memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
+
+       /* First item in chunk of DMA memory: 
+        * 32-slot command request table (CRQB), 32 bytes each in size
+        */
+       pp->crqb = mem;
+       pp->crqb_dma = mem_dma;
+       mem += MV_CRQB_Q_SZ;
+       mem_dma += MV_CRQB_Q_SZ;
+
+       /* Second item: 
+        * 32-slot command response table (CRPB), 8 bytes each in size
+        */
+       pp->crpb = mem;
+       pp->crpb_dma = mem_dma;
+       mem += MV_CRPB_Q_SZ;
+       mem_dma += MV_CRPB_Q_SZ;
+
+       /* Third item:
+        * Table of scatter-gather descriptors (ePRD), 16 bytes each
+        */
+       pp->sg_tbl = mem;
+       pp->sg_tbl_dma = mem_dma;
+
+       writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 
+                EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
+
+       writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
+       writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 
+                port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+
+       writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+       writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+
+       writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
+       writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 
+                port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+
+       pp->req_producer = pp->rsp_consumer = 0;
+
+       /* Don't turn on EDMA here...do it before DMA commands only.  Else
+        * we'll be unable to send non-data, PIO, etc due to restricted access
+        * to shadow regs.
+        */
+       ap->private_data = pp;
+       return 0;
+}
+
+/**
+ *      mv_port_stop - Port specific cleanup/stop routine.
+ *      @ap: ATA channel to manipulate
+ *
+ *      Stop DMA, cleanup port memory.
+ *
+ *      LOCKING:
+ *      This routine uses the host_set lock to protect the DMA stop.
+ */
+static void mv_port_stop(struct ata_port *ap)
+{
+       struct device *dev = ap->host_set->dev;
+       struct mv_port_priv *pp = ap->private_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ap->host_set->lock, flags);
+       mv_stop_dma(ap);
+       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+       ap->private_data = NULL;
+       dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
+       kfree(pp);
+}
+
+/**
+ *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
+ *      @qc: queued command whose SG list to source from
+ *
+ *      Populate the SG list and mark the last entry.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_fill_sg(struct ata_queued_cmd *qc)
+{
+       struct mv_port_priv *pp = qc->ap->private_data;
+       unsigned int i;
+
+       for (i = 0; i < qc->n_elem; i++) {
+               u32 sg_len;
+               dma_addr_t addr;
+
+               addr = sg_dma_address(&qc->sg[i]);
+               sg_len = sg_dma_len(&qc->sg[i]);
+
+               pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
+               pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+               assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
+               pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
+       }
+       if (0 < qc->n_elem) {
+               pp->sg_tbl[qc->n_elem - 1].flags_size |= 
+                       cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+       }
+}
+
+static inline unsigned mv_inc_q_index(unsigned *index)
+{
+       *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
+       return *index;
+}
+
+static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
+{
+       *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
+               (last ? CRQB_CMD_LAST : 0);
+}
 
-       /* bug here b/c we got an err int on a port we don't know about,
-        * so there's no way to clear it
+/**
+ *      mv_qc_prep - Host specific command preparation.
+ *      @qc: queued command to prepare
+ *
+ *      This routine simply redirects to the general purpose routine
+ *      if command is not DMA.  Else, it handles prep of the CRQB
+ *      (command request block), does some sanity checking, and calls
+ *      the SG load routine.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_qc_prep(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct mv_port_priv *pp = ap->private_data;
+       u16 *cw;
+       struct ata_taskfile *tf;
+       u16 flags = 0;
+
+       if (ATA_PROT_DMA != qc->tf.protocol) {
+               return;
+       }
+
+       /* the req producer index should be the same as we remember it */
+       assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 
+                EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+              pp->req_producer);
+
+       /* Fill in command request block
         */
-       BUG_ON(NULL == ap);
-       port_mmio = mv_ap_base(ap);
+       if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
+               flags |= CRQB_FLAG_READ;
+       }
+       assert(MV_MAX_Q_DEPTH > qc->tag);
+       flags |= qc->tag << CRQB_TAG_SHIFT;
+
+       pp->crqb[pp->req_producer].sg_addr = 
+               cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
+       pp->crqb[pp->req_producer].sg_addr_hi = 
+               cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
+       pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
+
+       cw = &pp->crqb[pp->req_producer].ata_cmd[0];
+       tf = &qc->tf;
+
+       /* Sadly, the CRQB cannot accomodate all registers--there are
+        * only 11 bytes...so we must pick and choose required
+        * registers based on the command.  So, we drop feature and
+        * hob_feature for [RW] DMA commands, but they are needed for
+        * NCQ.  NCQ will drop hob_nsect.
+        */
+       switch (tf->command) {
+       case ATA_CMD_READ:
+       case ATA_CMD_READ_EXT:
+       case ATA_CMD_WRITE:
+       case ATA_CMD_WRITE_EXT:
+               mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
+               break;
+#ifdef LIBATA_NCQ              /* FIXME: remove this line when NCQ added */
+       case ATA_CMD_FPDMA_READ:
+       case ATA_CMD_FPDMA_WRITE:
+               mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 
+               mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
+               break;
+#endif                         /* FIXME: remove this line when NCQ added */
+       default:
+               /* The only other commands EDMA supports in non-queued and
+                * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
+                * of which are defined/used by Linux.  If we get here, this
+                * driver needs work.
+                *
+                * FIXME: modify libata to give qc_prep a return value and
+                * return error here.
+                */
+               BUG_ON(tf->command);
+               break;
+       }
+       mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
+       mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
+       mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
+       mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
+       mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
+       mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
+       mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
+       mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
+       mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);    /* last */
+
+       if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
+               return;
+       }
+       mv_fill_sg(qc);
+}
+
+/**
+ *      mv_qc_issue - Initiate a command to the host
+ *      @qc: queued command to start
+ *
+ *      This routine simply redirects to the general purpose routine
+ *      if command is not DMA.  Else, it sanity checks our local
+ *      caches of the request producer/consumer indices then enables
+ *      DMA and bumps the request producer index.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_qc_issue(struct ata_queued_cmd *qc)
+{
+       void __iomem *port_mmio = mv_ap_base(qc->ap);
+       struct mv_port_priv *pp = qc->ap->private_data;
+       u32 in_ptr;
+
+       if (ATA_PROT_DMA != qc->tf.protocol) {
+               /* We're about to send a non-EDMA capable command to the
+                * port.  Turn off EDMA so there won't be problems accessing
+                * shadow block, etc registers.
+                */
+               mv_stop_dma(qc->ap);
+               return ata_qc_issue_prot(qc);
+       }
+
+       in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+
+       /* the req producer index should be the same as we remember it */
+       assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+              pp->req_producer);
+       /* until we do queuing, the queue should be empty at this point */
+       assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+              ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 
+                EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
+
+       mv_inc_q_index(&pp->req_producer);      /* now incr producer index */
+
+       mv_start_dma(port_mmio, pp);
+
+       /* and write the request in pointer to kick the EDMA to life */
+       in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
+       in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
+       writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+
+       return 0;
+}
+
+/**
+ *      mv_get_crpb_status - get status from most recently completed cmd
+ *      @ap: ATA channel to manipulate
+ *
+ *      This routine is for use when the port is in DMA mode, when it
+ *      will be using the CRPB (command response block) method of
+ *      returning command completion information.  We assert indices
+ *      are good, grab status, and bump the response consumer index to
+ *      prove that we're up to date.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static u8 mv_get_crpb_status(struct ata_port *ap)
+{
+       void __iomem *port_mmio = mv_ap_base(ap);
+       struct mv_port_priv *pp = ap->private_data;
+       u32 out_ptr;
+
+       out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+
+       /* the response consumer index should be the same as we remember it */
+       assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 
+              pp->rsp_consumer);
+
+       /* increment our consumer index... */
+       pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
+       
+       /* and, until we do NCQ, there should only be 1 CRPB waiting */
+       assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 
+                EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 
+              pp->rsp_consumer);
+
+       /* write out our inc'd consumer index so EDMA knows we're caught up */
+       out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
+       out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
+       writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+
+       /* Return ATA status register for completed CRPB */
+       return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
+}
+
+/**
+ *      mv_err_intr - Handle error interrupts on the port
+ *      @ap: ATA channel to manipulate
+ *
+ *      In most cases, just clear the interrupt and move on.  However,
+ *      some cases require an eDMA reset, which is done right before
+ *      the COMRESET in mv_phy_reset().  The SERR case requires a
+ *      clear of pending errors in the SATA SERROR register.  Finally,
+ *      if the port disabled DMA, update our cached copy to match.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_err_intr(struct ata_port *ap)
+{
+       void __iomem *port_mmio = mv_ap_base(ap);
+       u32 edma_err_cause, serr = 0;
 
        edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
 
@@ -465,8 +1026,12 @@ static void mv_err_intr(struct ata_port *ap)
                serr = scr_read(ap, SCR_ERROR);
                scr_write_flush(ap, SCR_ERROR, serr);
        }
-       DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 
-               ap->port_no, edma_err_cause, serr);
+       if (EDMA_ERR_SELF_DIS & edma_err_cause) {
+               struct mv_port_priv *pp = ap->private_data;
+               pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+       }
+       DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
+               "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
 
        /* Clear EDMA now that SERR cleanup done */
        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1042,21 @@ static void mv_err_intr(struct ata_port *ap)
        }
 }
 
-/* Handle any outstanding interrupts in a single SATAHC 
+/**
+ *      mv_host_intr - Handle all interrupts on the given host controller
+ *      @host_set: host specific structure
+ *      @relevant: port error bits relevant to this host controller
+ *      @hc: which host controller we're to look at
+ *
+ *      Read then write clear the HC interrupt status then walk each
+ *      port connected to the HC and see if it needs servicing.  Port
+ *      success ints are reported in the HC interrupt status reg, the
+ *      port error ints are reported in the higher level main
+ *      interrupt status register and thus are passed in via the
+ *      'relevant' argument.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
  */
 static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
                         unsigned int hc)
@@ -487,8 +1066,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
        struct ata_port *ap;
        struct ata_queued_cmd *qc;
        u32 hc_irq_cause;
-       int shift, port, port0, hard_port;
-       u8 ata_status;
+       int shift, port, port0, hard_port, handled;
+       u8 ata_status = 0;
 
        if (hc == 0) {
                port0 = 0;
@@ -499,7 +1078,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
        /* we'll need the HC success int register in most cases */
        hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
        if (hc_irq_cause) {
-               writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
+               writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
        }
 
        VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,35 +1087,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
        for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
                ap = host_set->ports[port];
                hard_port = port & MV_PORT_MASK;        /* range 0-3 */
-               ata_status = 0xffU;
+               handled = 0;    /* ensure ata_status is set if handled++ */
 
-               if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) {
-                       BUG_ON(NULL == ap);
-                       /* rcv'd new resp, basic DMA complete, or ATA IRQ */
-                       /* This is needed to clear the ATA INTRQ.
-                        * FIXME: don't read the status reg in EDMA mode!
+               if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
+                       /* new CRPB on the queue; just one at a time until NCQ
+                        */
+                       ata_status = mv_get_crpb_status(ap);
+                       handled++;
+               } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
+                       /* received ATA IRQ; read the status reg to clear INTRQ
                         */
                        ata_status = readb((void __iomem *)
                                           ap->ioaddr.status_addr);
+                       handled++;
                }
 
-               shift = port * 2;
+               shift = port << 1;              /* (port * 2) */
                if (port >= MV_PORTS_PER_HC) {
                        shift++;        /* skip bit 8 in the HC Main IRQ reg */
                }
                if ((PORT0_ERR << shift) & relevant) {
                        mv_err_intr(ap);
-                       /* FIXME: smart to OR in ATA_ERR? */
+                       /* OR in ATA_ERR to ensure libata knows we took one */
                        ata_status = readb((void __iomem *)
                                           ap->ioaddr.status_addr) | ATA_ERR;
+                       handled++;
                }
                
-               if (ap) {
+               if (handled && ap) {
                        qc = ata_qc_from_tag(ap, ap->active_tag);
                        if (NULL != qc) {
                                VPRINTK("port %u IRQ found for qc, "
                                        "ata_status 0x%x\n", port,ata_status);
-                               BUG_ON(0xffU == ata_status);
                                /* mark qc status appropriately */
                                ata_qc_complete(qc, ata_status);
                        }
@@ -545,17 +1127,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
        VPRINTK("EXIT\n");
 }
 
+/**
+ *      mv_interrupt - 
+ *      @irq: unused
+ *      @dev_instance: private data; in this case the host structure
+ *      @regs: unused
+ *
+ *      Read the read only register to determine if any host
+ *      controllers have pending interrupts.  If so, call lower level
+ *      routine to handle.  Also check for PCI errors which are only
+ *      reported here.
+ *
+ *      LOCKING: 
+ *      This routine holds the host_set lock while processing pending
+ *      interrupts.
+ */
 static irqreturn_t mv_interrupt(int irq, void *dev_instance,
                                struct pt_regs *regs)
 {
        struct ata_host_set *host_set = dev_instance;
        unsigned int hc, handled = 0, n_hcs;
-       void __iomem *mmio;
+       void __iomem *mmio = host_set->mmio_base;
        u32 irq_stat;
 
-       mmio = host_set->mmio_base;
        irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
-       n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
 
        /* check the cases where we either have nothing pending or have read
         * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1159,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
                return IRQ_NONE;
        }
 
+       n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
        spin_lock(&host_set->lock);
 
        for (hc = 0; hc < n_hcs; hc++) {
                u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
                if (relevant) {
                        mv_host_intr(host_set, relevant, hc);
-                       handled = 1;
+                       handled++;
                }
        }
        if (PCI_ERR & irq_stat) {
-               /* FIXME: these are all masked by default, but still need
-                * to recover from them properly.
-                */
-       }
+               printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
+                      readl(mmio + PCI_IRQ_CAUSE_OFS));
 
+               DPRINTK("All regs @ PCI error\n");
+               mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
+
+               writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
+               handled++;
+       }
        spin_unlock(&host_set->lock);
 
        return IRQ_RETVAL(handled);
 }
 
+/**
+ *      mv_check_err - Return the error shadow register to caller.
+ *      @ap: ATA channel to manipulate
+ *
+ *      Marvell requires DMA to be stopped before accessing shadow
+ *      registers.  So we do that, then return the needed register.
+ *
+ *      LOCKING:
+ *      Inherited from caller.  FIXME: protect mv_stop_dma with lock?
+ */
+static u8 mv_check_err(struct ata_port *ap)
+{
+       mv_stop_dma(ap);                /* can't read shadow regs if DMA on */
+       return readb((void __iomem *) ap->ioaddr.error_addr);
+}
+
+/**
+ *      mv_phy_reset - Perform eDMA reset followed by COMRESET
+ *      @ap: ATA channel to manipulate
+ *
+ *      Part of this is taken from __sata_phy_reset and modified to
+ *      not sleep since this routine gets called from interrupt level.
+ *
+ *      LOCKING:
+ *      Inherited from caller.  This is coded to safe to call at
+ *      interrupt level, i.e. it does not sleep.
+ */
 static void mv_phy_reset(struct ata_port *ap)
 {
        void __iomem *port_mmio = mv_ap_base(ap);
        struct ata_taskfile tf;
        struct ata_device *dev = &ap->device[0];
-       u32 edma = 0, bdma;
+       unsigned long timeout;
 
        VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
 
-       edma = readl(port_mmio + EDMA_CMD_OFS);
-       if (EDMA_EN & edma) {
-               /* disable EDMA if active */
-               edma &= ~EDMA_EN;
-               writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
-               udelay(1);
-       } else if (mv_port_bdma_capable(ap) &&
-                  (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
-               /* disable BDMA if active */
-               writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
-       }
+       mv_stop_dma(ap);
 
-       writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS);
+       writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
        udelay(25);             /* allow reset propagation */
 
        /* Spec never mentions clearing the bit.  Marvell's driver does
         * clear the bit, however.
         */
-       writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS);
+       writelfl(0, port_mmio + EDMA_CMD_OFS);
 
-       VPRINTK("Done.  Now calling __sata_phy_reset()\n");
+       VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
+               "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
+               mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
 
        /* proceed to init communications via the scr_control reg */
-       __sata_phy_reset(ap);
+       scr_write_flush(ap, SCR_CONTROL, 0x301);
+       mdelay(1);
+       scr_write_flush(ap, SCR_CONTROL, 0x300);
+       timeout = jiffies + (HZ * 1);
+       do {
+               mdelay(10);
+               if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
+                       break;
+       } while (time_before(jiffies, timeout));
 
-       if (ap->flags & ATA_FLAG_PORT_DISABLED) {
-               VPRINTK("Port disabled pre-sig.  Exiting.\n");
+       VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
+               "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
+               mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
+
+       if (sata_dev_present(ap)) {
+               ata_port_probe(ap);
+       } else {
+               printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
+                      ap->id, scr_read(ap, SCR_STATUS));
+               ata_port_disable(ap);
                return;
        }
+       ap->cbl = ATA_CBL_SATA;
 
        tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
        tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1272,118 @@ static void mv_phy_reset(struct ata_port *ap)
        VPRINTK("EXIT\n");
 }
 
-static void mv_port_init(struct ata_ioports *port, unsigned long base)
+/**
+ *      mv_eng_timeout - Routine called by libata when SCSI times out I/O
+ *      @ap: ATA channel to manipulate
+ *
+ *      Intent is to clear all pending error conditions, reset the
+ *      chip/bus, fail the command, and move on.
+ *
+ *      LOCKING:
+ *      This routine holds the host_set lock while failing the command.
+ */
+static void mv_eng_timeout(struct ata_port *ap)
+{
+       struct ata_queued_cmd *qc;
+       unsigned long flags;
+
+       printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
+       DPRINTK("All regs @ start of eng_timeout\n");
+       mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 
+                        to_pci_dev(ap->host_set->dev));
+
+       qc = ata_qc_from_tag(ap, ap->active_tag);
+        printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
+              ap->host_set->mmio_base, ap, qc, qc->scsicmd, 
+              &qc->scsicmd->cmnd);
+
+       mv_err_intr(ap);
+       mv_phy_reset(ap);
+
+       if (!qc) {
+               printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+                      ap->id);
+       } else {
+               /* hack alert!  We cannot use the supplied completion
+                * function from inside the ->eh_strategy_handler() thread.
+                * libata is the only user of ->eh_strategy_handler() in
+                * any kernel, so the default scsi_done() assumes it is
+                * not being called from the SCSI EH.
+                */
+               spin_lock_irqsave(&ap->host_set->lock, flags);
+               qc->scsidone = scsi_finish_command;
+               ata_qc_complete(qc, ATA_ERR);
+               spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       }
+}
+
+/**
+ *      mv_port_init - Perform some early initialization on a single port.
+ *      @port: libata data structure storing shadow register addresses
+ *      @port_mmio: base address of the port
+ *
+ *      Initialize shadow register mmio addresses, clear outstanding
+ *      interrupts on the port, and unmask interrupts for the future
+ *      start of the port.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
 {
-       /* PIO related setup */
-       port->data_addr = base + SHD_PIO_DATA_OFS;
-       port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS;
-       port->nsect_addr = base + SHD_SECT_CNT_OFS;
-       port->lbal_addr = base + SHD_LBA_L_OFS;
-       port->lbam_addr = base + SHD_LBA_M_OFS;
-       port->lbah_addr = base + SHD_LBA_H_OFS;
-       port->device_addr = base + SHD_DEV_HD_OFS;
-       port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS;
-       port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS;
-       /* unused */
+       unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
+       unsigned serr_ofs;
+
+       /* PIO related setup 
+        */
+       port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
+       port->error_addr = 
+               port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
+       port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
+       port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
+       port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
+       port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
+       port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
+       port->status_addr = 
+               port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
+       /* special case: control/altstatus doesn't have ATA_REG_ address */
+       port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
+
+       /* unused: */
        port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
 
+       /* Clear any currently outstanding port interrupt conditions */
+       serr_ofs = mv_scr_offset(SCR_ERROR);
+       writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
+       writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
        /* unmask all EDMA error interrupts */
-       writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS);
+       writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
 
        VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 
-               readl((void __iomem *)base + EDMA_CFG_OFS),
-               readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS),
-               readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS));
+               readl(port_mmio + EDMA_CFG_OFS),
+               readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
+               readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
 }
 
+/**
+ *      mv_host_init - Perform some early initialization of the host.
+ *      @probe_ent: early data struct representing the host
+ *
+ *      If possible, do an early global reset of the host.  Then do
+ *      our port init and clear/unmask all/relevant host interrupts.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
 static int mv_host_init(struct ata_probe_ent *probe_ent)
 {
        int rc = 0, n_hc, port, hc;
        void __iomem *mmio = probe_ent->mmio_base;
        void __iomem *port_mmio;
 
-       if (mv_master_reset(probe_ent->mmio_base)) {
+       if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && 
+           mv_global_soft_reset(probe_ent->mmio_base)) {
                rc = 1;
                goto done;
        }
@@ -676,17 +1393,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
 
        for (port = 0; port < probe_ent->n_ports; port++) {
                port_mmio = mv_port_base(mmio, port);
-               mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio);
+               mv_port_init(&probe_ent->port[port], port_mmio);
        }
 
        for (hc = 0; hc < n_hc; hc++) {
-               VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc,
-                       readl(mv_hc_base(mmio, hc) + HC_CFG_OFS),
-                       readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS));
+               void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+
+               VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
+                       "(before clear)=0x%08x\n", hc,
+                       readl(hc_mmio + HC_CFG_OFS),
+                       readl(hc_mmio + HC_IRQ_CAUSE_OFS));
+
+               /* Clear any currently outstanding hc interrupt conditions */
+               writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
        }
 
-       writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
-       writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
+       /* Clear any currently outstanding host interrupt conditions */
+       writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
+
+       /* and unmask interrupt generation for host regs */
+       writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
+       writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
 
        VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
                "PCI int cause/mask=0x%08x/0x%08x\n", 
@@ -694,11 +1421,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
                readl(mmio + HC_MAIN_IRQ_MASK_OFS),
                readl(mmio + PCI_IRQ_CAUSE_OFS),
                readl(mmio + PCI_IRQ_MASK_OFS));
-
- done:
+done:
        return rc;
 }
 
+/**
+ *      mv_print_info - Dump key info to kernel log for perusal.
+ *      @probe_ent: early data struct representing the host
+ *
+ *      FIXME: complete this.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_print_info(struct ata_probe_ent *probe_ent)
+{
+       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
+       struct mv_host_priv *hpriv = probe_ent->private_data;
+       u8 rev_id, scc;
+       const char *scc_s;
+
+       /* Use this to determine the HW stepping of the chip so we know
+        * what errata to workaround
+        */
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+
+       pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
+       if (scc == 0)
+               scc_s = "SCSI";
+       else if (scc == 0x01)
+               scc_s = "RAID";
+       else
+               scc_s = "unknown";
+
+       printk(KERN_INFO DRV_NAME 
+              "(%s) %u slots %u ports %s mode IRQ via %s\n",
+              pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, 
+              scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
+}
+
+/**
+ *      mv_init_one - handle a positive probe of a Marvell host
+ *      @pdev: PCI device found
+ *      @ent: PCI device ID entry for the matched host
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        static int printed_version = 0;
@@ -706,16 +1475,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct mv_host_priv *hpriv;
        unsigned int board_idx = (unsigned int)ent->driver_data;
        void __iomem *mmio_base;
-       int pci_dev_busy = 0;
-       int rc;
+       int pci_dev_busy = 0, rc;
 
        if (!printed_version++) {
-               printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+               printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n");
        }
 
-       VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
-               PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
        rc = pci_enable_device(pdev);
        if (rc) {
                return rc;
@@ -727,8 +1492,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out;
        }
 
-       pci_intx(pdev, 1);
-
        probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
        if (probe_ent == NULL) {
                rc = -ENOMEM;
@@ -739,8 +1502,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        probe_ent->dev = pci_dev_to_dev(pdev);
        INIT_LIST_HEAD(&probe_ent->node);
 
-       mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR),
-                                   pci_resource_len(pdev, MV_PRIMARY_BAR));
+       mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
        if (mmio_base == NULL) {
                rc = -ENOMEM;
                goto err_out_free_ent;
@@ -769,37 +1531,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc) {
                goto err_out_hpriv;
        }
-/*     mv_print_info(probe_ent); */
 
-       {
-               int b, w;
-               u32 dw[4];      /* hold a line of 16b */
-               VPRINTK("PCI config space:\n");
-               for (b = 0; b < 0x40; ) {
-                       for (w = 0; w < 4; w++) {
-                               (void) pci_read_config_dword(pdev,b,&dw[w]);
-                               b += sizeof(*dw);
-                       }
-                       VPRINTK("%08x %08x %08x %08x\n",
-                               dw[0],dw[1],dw[2],dw[3]);
-               }
+       /* Enable interrupts */
+       if (pci_enable_msi(pdev) == 0) {
+               hpriv->hp_flags |= MV_HP_FLAG_MSI;
+       } else {
+               pci_intx(pdev, 1);
        }
 
-       /* FIXME: check ata_device_add return value */
-       ata_device_add(probe_ent);
-       kfree(probe_ent);
+       mv_dump_pci_cfg(pdev, 0x68);
+       mv_print_info(probe_ent);
+
+       if (ata_device_add(probe_ent) == 0) {
+               rc = -ENODEV;           /* No devices discovered */
+               goto err_out_dev_add;
+       }
 
+       kfree(probe_ent);
        return 0;
 
- err_out_hpriv:
+err_out_dev_add:
+       if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
+               pci_disable_msi(pdev);
+       } else {
+               pci_intx(pdev, 0);
+       }
+err_out_hpriv:
        kfree(hpriv);
- err_out_iounmap:
-       iounmap(mmio_base);
- err_out_free_ent:
+err_out_iounmap:
+       pci_iounmap(pdev, mmio_base);
+err_out_free_ent:
        kfree(probe_ent);
- err_out_regions:
+err_out_regions:
        pci_release_regions(pdev);
- err_out:
+err_out:
        if (!pci_dev_busy) {
                pci_disable_device(pdev);
        }
index cb832b03ec5e0cc0765523d571b8f8702eaa22c0..1a56d6c79dddfc8fac2e52d4320110977e1ce599 100644 (file)
@@ -238,7 +238,7 @@ static Scsi_Host_Template nv_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations nv_ops = {
+static const struct ata_port_operations nv_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = ata_tf_load,
        .tf_read                = ata_tf_read,
@@ -331,7 +331,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
                return 0xffffffffU;
 
        if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-               return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));
+               return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
        else
                return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
 }
@@ -345,7 +345,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
                return;
 
        if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-               writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));
+               writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
        else
                outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
 }
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        rc = -ENOMEM;
 
        ppi = &nv_port_info;
-       probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+       probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
        if (!probe_ent)
                goto err_out_regions;
 
index 538ad727bd2eb897c32cad017dcad8e11701d6bf..eee93b0016df36f86056cd4f8040ba0182019d76 100644 (file)
@@ -87,8 +87,8 @@ static void pdc_port_stop(struct ata_port *ap);
 static void pdc_pata_phy_reset(struct ata_port *ap);
 static void pdc_sata_phy_reset(struct ata_port *ap);
 static void pdc_qc_prep(struct ata_queued_cmd *qc);
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_irq_clear(struct ata_port *ap);
 static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
 
@@ -113,7 +113,7 @@ static Scsi_Host_Template pdc_ata_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations pdc_sata_ops = {
+static const struct ata_port_operations pdc_sata_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = pdc_tf_load_mmio,
        .tf_read                = ata_tf_read,
@@ -136,7 +136,7 @@ static struct ata_port_operations pdc_sata_ops = {
        .host_stop              = ata_pci_host_stop,
 };
 
-static struct ata_port_operations pdc_pata_ops = {
+static const struct ata_port_operations pdc_pata_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = pdc_tf_load_mmio,
        .tf_read                = ata_tf_read,
@@ -324,7 +324,7 @@ static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
        if (sc_reg > SCR_CONTROL)
                return 0xffffffffU;
-       return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+       return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -333,7 +333,7 @@ static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
 {
        if (sc_reg > SCR_CONTROL)
                return;
-       writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+       writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
                break;
 
         default:
-                ap->stats.idle_irq++;
-                break;
+               ap->stats.idle_irq++;
+               break;
         }
 
-        return handled;
+       return handled;
 }
 
 static void pdc_irq_clear(struct ata_port *ap)
@@ -523,8 +523,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
 
        pp->pkt[2] = seq;
        wmb();                  /* flush PRD, pkt writes */
-       writel(pp->pkt_dma, (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
-       readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
+       writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+       readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
 }
 
 static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
@@ -546,7 +546,7 @@ static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
        return ata_qc_issue_prot(qc);
 }
 
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        WARN_ON (tf->protocol == ATA_PROT_DMA ||
                 tf->protocol == ATA_PROT_NODATA);
@@ -554,7 +554,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
 }
 
 
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        WARN_ON (tf->protocol == ATA_PROT_DMA ||
                 tf->protocol == ATA_PROT_NODATA);
index ffcdeb68641cf5a2493abf7be5b68b1da32f329c..250dafa6bc363721f189ceeaf030522356ad8242 100644 (file)
@@ -51,8 +51,6 @@ enum {
        QS_PRD_BYTES            = QS_MAX_PRD * 16,
        QS_PKT_BYTES            = QS_CPB_BYTES + QS_PRD_BYTES,
 
-       QS_DMA_BOUNDARY         = ~0UL,
-
        /* global register offsets */
        QS_HCF_CNFG3            = 0x0003, /* host configuration offset */
        QS_HID_HPHY             = 0x0004, /* host physical interface info */
@@ -101,6 +99,10 @@ enum {
        board_2068_idx          = 0,    /* QStor 4-port SATA/RAID */
 };
 
+enum {
+       QS_DMA_BOUNDARY         = ~0UL
+};
+
 typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
 
 struct qs_port_priv {
@@ -145,7 +147,7 @@ static Scsi_Host_Template qs_ata_sht = {
        .bios_param             = ata_std_bios_param,
 };
 
-static struct ata_port_operations qs_ata_ops = {
+static const struct ata_port_operations qs_ata_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = ata_tf_load,
        .tf_read                = ata_tf_read,
index ba98a175ee3a8f9e227917689b9678285df586c3..3a056173fb95a2c9a62598906fcec57231e7db76 100644 (file)
@@ -150,7 +150,7 @@ static Scsi_Host_Template sil_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations sil_ops = {
+static const struct ata_port_operations sil_ops = {
        .port_disable           = ata_port_disable,
        .dev_config             = sil_dev_config,
        .tf_load                = ata_tf_load,
@@ -289,7 +289,7 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re
 
 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
-       void *mmio = (void *) sil_scr_addr(ap, sc_reg);
+       void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
        if (mmio)
                return readl(mmio);
        return 0xffffffffU;
@@ -297,7 +297,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
 
 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
-       void *mmio = (void *) sil_scr_addr(ap, sc_reg);
+       void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
        if (mmio)
                writel(val, mmio);
 }
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
new file mode 100644 (file)
index 0000000..32d730b
--- /dev/null
@@ -0,0 +1,875 @@
+/*
+ * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
+ *
+ * Copyright 2005  Tejun Heo
+ *
+ * Based on preview driver from Silicon Image.
+ *
+ * NOTE: No NCQ/ATAPI support yet.  The preview driver didn't support
+ * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make
+ * those work.  Enabling those shouldn't be difficult.  Basic
+ * structure is all there (in libata-dev tree).  If you have any
+ * information about this hardware, please contact me or linux-ide.
+ * Info is needed on...
+ *
+ * - How to issue tagged commands and turn on sactive on issue accordingly.
+ * - Where to put an ATAPI command and how to tell the device to send it.
+ * - How to enable/use 64bit.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <scsi/scsi_host.h>
+#include "scsi.h"
+#include <linux/libata.h>
+#include <asm/io.h>
+
+#define DRV_NAME       "sata_sil24"
+#define DRV_VERSION    "0.22"  /* Silicon Image's preview driver was 0.10 */
+
+/*
+ * Port request block (PRB) 32 bytes
+ */
+struct sil24_prb {
+       u16     ctrl;
+       u16     prot;
+       u32     rx_cnt;
+       u8      fis[6 * 4];
+};
+
+/*
+ * Scatter gather entry (SGE) 16 bytes
+ */
+struct sil24_sge {
+       u64     addr;
+       u32     cnt;
+       u32     flags;
+};
+
+/*
+ * Port multiplier
+ */
+struct sil24_port_multiplier {
+       u32     diag;
+       u32     sactive;
+};
+
+enum {
+       /*
+        * Global controller registers (128 bytes @ BAR0)
+        */
+               /* 32 bit regs */
+       HOST_SLOT_STAT          = 0x00, /* 32 bit slot stat * 4 */
+       HOST_CTRL               = 0x40,
+       HOST_IRQ_STAT           = 0x44,
+       HOST_PHY_CFG            = 0x48,
+       HOST_BIST_CTRL          = 0x50,
+       HOST_BIST_PTRN          = 0x54,
+       HOST_BIST_STAT          = 0x58,
+       HOST_MEM_BIST_STAT      = 0x5c,
+       HOST_FLASH_CMD          = 0x70,
+               /* 8 bit regs */
+       HOST_FLASH_DATA         = 0x74,
+       HOST_TRANSITION_DETECT  = 0x75,
+       HOST_GPIO_CTRL          = 0x76,
+       HOST_I2C_ADDR           = 0x78, /* 32 bit */
+       HOST_I2C_DATA           = 0x7c,
+       HOST_I2C_XFER_CNT       = 0x7e,
+       HOST_I2C_CTRL           = 0x7f,
+
+       /* HOST_SLOT_STAT bits */
+       HOST_SSTAT_ATTN         = (1 << 31),
+
+       /*
+        * Port registers
+        * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
+        */
+       PORT_REGS_SIZE          = 0x2000,
+       PORT_PRB                = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */
+
+       PORT_PM                 = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
+               /* 32 bit regs */
+       PORT_CTRL_STAT          = 0x1000, /* write: ctrl-set, read: stat */
+       PORT_CTRL_CLR           = 0x1004, /* write: ctrl-clear */
+       PORT_IRQ_STAT           = 0x1008, /* high: status, low: interrupt */
+       PORT_IRQ_ENABLE_SET     = 0x1010, /* write: enable-set */
+       PORT_IRQ_ENABLE_CLR     = 0x1014, /* write: enable-clear */
+       PORT_ACTIVATE_UPPER_ADDR= 0x101c,
+       PORT_EXEC_FIFO          = 0x1020, /* command execution fifo */
+       PORT_CMD_ERR            = 0x1024, /* command error number */
+       PORT_FIS_CFG            = 0x1028,
+       PORT_FIFO_THRES         = 0x102c,
+               /* 16 bit regs */
+       PORT_DECODE_ERR_CNT     = 0x1040,
+       PORT_DECODE_ERR_THRESH  = 0x1042,
+       PORT_CRC_ERR_CNT        = 0x1044,
+       PORT_CRC_ERR_THRESH     = 0x1046,
+       PORT_HSHK_ERR_CNT       = 0x1048,
+       PORT_HSHK_ERR_THRESH    = 0x104a,
+               /* 32 bit regs */
+       PORT_PHY_CFG            = 0x1050,
+       PORT_SLOT_STAT          = 0x1800,
+       PORT_CMD_ACTIVATE       = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
+       PORT_EXEC_DIAG          = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
+       PORT_PSD_DIAG           = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
+       PORT_SCONTROL           = 0x1f00,
+       PORT_SSTATUS            = 0x1f04,
+       PORT_SERROR             = 0x1f08,
+       PORT_SACTIVE            = 0x1f0c,
+
+       /* PORT_CTRL_STAT bits */
+       PORT_CS_PORT_RST        = (1 << 0), /* port reset */
+       PORT_CS_DEV_RST         = (1 << 1), /* device reset */
+       PORT_CS_INIT            = (1 << 2), /* port initialize */
+       PORT_CS_IRQ_WOC         = (1 << 3), /* interrupt write one to clear */
+       PORT_CS_RESUME          = (1 << 6), /* port resume */
+       PORT_CS_32BIT_ACTV      = (1 << 10), /* 32-bit activation */
+       PORT_CS_PM_EN           = (1 << 13), /* port multiplier enable */
+       PORT_CS_RDY             = (1 << 31), /* port ready to accept commands */
+
+       /* PORT_IRQ_STAT/ENABLE_SET/CLR */
+       /* bits[11:0] are masked */
+       PORT_IRQ_COMPLETE       = (1 << 0), /* command(s) completed */
+       PORT_IRQ_ERROR          = (1 << 1), /* command execution error */
+       PORT_IRQ_PORTRDY_CHG    = (1 << 2), /* port ready change */
+       PORT_IRQ_PWR_CHG        = (1 << 3), /* power management change */
+       PORT_IRQ_PHYRDY_CHG     = (1 << 4), /* PHY ready change */
+       PORT_IRQ_COMWAKE        = (1 << 5), /* COMWAKE received */
+       PORT_IRQ_UNK_FIS        = (1 << 6), /* Unknown FIS received */
+       PORT_IRQ_SDB_FIS        = (1 << 11), /* SDB FIS received */
+
+       /* bits[27:16] are unmasked (raw) */
+       PORT_IRQ_RAW_SHIFT      = 16,
+       PORT_IRQ_MASKED_MASK    = 0x7ff,
+       PORT_IRQ_RAW_MASK       = (0x7ff << PORT_IRQ_RAW_SHIFT),
+
+       /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
+       PORT_IRQ_STEER_SHIFT    = 30,
+       PORT_IRQ_STEER_MASK     = (3 << PORT_IRQ_STEER_SHIFT),
+
+       /* PORT_CMD_ERR constants */
+       PORT_CERR_DEV           = 1, /* Error bit in D2H Register FIS */
+       PORT_CERR_SDB           = 2, /* Error bit in SDB FIS */
+       PORT_CERR_DATA          = 3, /* Error in data FIS not detected by dev */
+       PORT_CERR_SEND          = 4, /* Initial cmd FIS transmission failure */
+       PORT_CERR_INCONSISTENT  = 5, /* Protocol mismatch */
+       PORT_CERR_DIRECTION     = 6, /* Data direction mismatch */
+       PORT_CERR_UNDERRUN      = 7, /* Ran out of SGEs while writing */
+       PORT_CERR_OVERRUN       = 8, /* Ran out of SGEs while reading */
+       PORT_CERR_PKT_PROT      = 11, /* DIR invalid in 1st PIO setup of ATAPI */
+       PORT_CERR_SGT_BOUNDARY  = 16, /* PLD ecode 00 - SGT not on qword boundary */
+       PORT_CERR_SGT_TGTABRT   = 17, /* PLD ecode 01 - target abort */
+       PORT_CERR_SGT_MSTABRT   = 18, /* PLD ecode 10 - master abort */
+       PORT_CERR_SGT_PCIPERR   = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
+       PORT_CERR_CMD_BOUNDARY  = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
+       PORT_CERR_CMD_TGTABRT   = 25, /* ctrl[15:13] 010 - target abort */
+       PORT_CERR_CMD_MSTABRT   = 26, /* ctrl[15:13] 100 - master abort */
+       PORT_CERR_CMD_PCIPERR   = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
+       PORT_CERR_XFR_UNDEF     = 32, /* PSD ecode 00 - undefined */
+       PORT_CERR_XFR_TGTABRT   = 33, /* PSD ecode 01 - target abort */
+       PORT_CERR_XFR_MSGABRT   = 34, /* PSD ecode 10 - master abort */
+       PORT_CERR_XFR_PCIPERR   = 35, /* PSD ecode 11 - PCI prity err during transfer */
+       PORT_CERR_SENDSERVICE   = 36, /* FIS received while sending service */
+
+       /*
+        * Other constants
+        */
+       SGE_TRM                 = (1 << 31), /* Last SGE in chain */
+       PRB_SOFT_RST            = (1 << 7),  /* Soft reset request (ign BSY?) */
+
+       /* board id */
+       BID_SIL3124             = 0,
+       BID_SIL3132             = 1,
+       BID_SIL3131             = 2,
+
+       IRQ_STAT_4PORTS         = 0xf,
+};
+
+struct sil24_cmd_block {
+       struct sil24_prb prb;
+       struct sil24_sge sge[LIBATA_MAX_PRD];
+};
+
+/*
+ * ap->private_data
+ *
+ * The preview driver always returned 0 for status.  We emulate it
+ * here from the previous interrupt.
+ */
+struct sil24_port_priv {
+       struct sil24_cmd_block *cmd_block;      /* 32 cmd blocks */
+       dma_addr_t cmd_block_dma;               /* DMA base addr for them */
+       struct ata_taskfile tf;                 /* Cached taskfile registers */
+};
+
+/* ap->host_set->private_data */
+struct sil24_host_priv {
+       void *host_base;        /* global controller control (128 bytes @BAR0) */
+       void *port_base;        /* port registers (4 * 8192 bytes @BAR2) */
+};
+
+static u8 sil24_check_status(struct ata_port *ap);
+static u8 sil24_check_err(struct ata_port *ap);
+static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
+static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
+static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+static void sil24_phy_reset(struct ata_port *ap);
+static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static int sil24_qc_issue(struct ata_queued_cmd *qc);
+static void sil24_irq_clear(struct ata_port *ap);
+static void sil24_eng_timeout(struct ata_port *ap);
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int sil24_port_start(struct ata_port *ap);
+static void sil24_port_stop(struct ata_port *ap);
+static void sil24_host_stop(struct ata_host_set *host_set);
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+static struct pci_device_id sil24_pci_tbl[] = {
+       { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
+       { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
+       { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
+       { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
+       { } /* terminate list */
+};
+
+static struct pci_driver sil24_pci_driver = {
+       .name                   = DRV_NAME,
+       .id_table               = sil24_pci_tbl,
+       .probe                  = sil24_init_one,
+       .remove                 = ata_pci_remove_one, /* safe? */
+};
+
+static Scsi_Host_Template sil24_sht = {
+       .module                 = THIS_MODULE,
+       .name                   = DRV_NAME,
+       .ioctl                  = ata_scsi_ioctl,
+       .queuecommand           = ata_scsi_queuecmd,
+       .eh_strategy_handler    = ata_scsi_error,
+       .can_queue              = ATA_DEF_QUEUE,
+       .this_id                = ATA_SHT_THIS_ID,
+       .sg_tablesize           = LIBATA_MAX_PRD,
+       .max_sectors            = ATA_MAX_SECTORS,
+       .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
+       .emulated               = ATA_SHT_EMULATED,
+       .use_clustering         = ATA_SHT_USE_CLUSTERING,
+       .proc_name              = DRV_NAME,
+       .dma_boundary           = ATA_DMA_BOUNDARY,
+       .slave_configure        = ata_scsi_slave_config,
+       .bios_param             = ata_std_bios_param,
+       .ordered_flush          = 1, /* NCQ not supported yet */
+};
+
+static const struct ata_port_operations sil24_ops = {
+       .port_disable           = ata_port_disable,
+
+       .check_status           = sil24_check_status,
+       .check_altstatus        = sil24_check_status,
+       .check_err              = sil24_check_err,
+       .dev_select             = ata_noop_dev_select,
+
+       .tf_read                = sil24_tf_read,
+
+       .phy_reset              = sil24_phy_reset,
+
+       .qc_prep                = sil24_qc_prep,
+       .qc_issue               = sil24_qc_issue,
+
+       .eng_timeout            = sil24_eng_timeout,
+
+       .irq_handler            = sil24_interrupt,
+       .irq_clear              = sil24_irq_clear,
+
+       .scr_read               = sil24_scr_read,
+       .scr_write              = sil24_scr_write,
+
+       .port_start             = sil24_port_start,
+       .port_stop              = sil24_port_stop,
+       .host_stop              = sil24_host_stop,
+};
+
+/*
+ * Use bits 30-31 of host_flags to encode available port numbers.
+ * Current maxium is 4.
+ */
+#define SIL24_NPORTS2FLAG(nports)      ((((unsigned)(nports) - 1) & 0x3) << 30)
+#define SIL24_FLAG2NPORTS(flag)                ((((flag) >> 30) & 0x3) + 1)
+
+static struct ata_port_info sil24_port_info[] = {
+       /* sil_3124 */
+       {
+               .sht            = &sil24_sht,
+               .host_flags     = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+                                 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+                                 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4),
+               .pio_mask       = 0x1f,                 /* pio0-4 */
+               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .udma_mask      = 0x3f,                 /* udma0-5 */
+               .port_ops       = &sil24_ops,
+       },
+       /* sil_3132 */ 
+       {
+               .sht            = &sil24_sht,
+               .host_flags     = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+                                 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+                                 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2),
+               .pio_mask       = 0x1f,                 /* pio0-4 */
+               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .udma_mask      = 0x3f,                 /* udma0-5 */
+               .port_ops       = &sil24_ops,
+       },
+       /* sil_3131/sil_3531 */
+       {
+               .sht            = &sil24_sht,
+               .host_flags     = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+                                 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+                                 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1),
+               .pio_mask       = 0x1f,                 /* pio0-4 */
+               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .udma_mask      = 0x3f,                 /* udma0-5 */
+               .port_ops       = &sil24_ops,
+       },
+};
+
+static inline void sil24_update_tf(struct ata_port *ap)
+{
+       struct sil24_port_priv *pp = ap->private_data;
+       void *port = (void *)ap->ioaddr.cmd_addr;
+       struct sil24_prb *prb = port;
+
+       ata_tf_from_fis(prb->fis, &pp->tf);
+}
+
+static u8 sil24_check_status(struct ata_port *ap)
+{
+       struct sil24_port_priv *pp = ap->private_data;
+       return pp->tf.command;
+}
+
+static u8 sil24_check_err(struct ata_port *ap)
+{
+       struct sil24_port_priv *pp = ap->private_data;
+       return pp->tf.feature;
+}
+
+static int sil24_scr_map[] = {
+       [SCR_CONTROL]   = 0,
+       [SCR_STATUS]    = 1,
+       [SCR_ERROR]     = 2,
+       [SCR_ACTIVE]    = 3,
+};
+
+static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
+{
+       void *scr_addr = (void *)ap->ioaddr.scr_addr;
+       if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+               void *addr;
+               addr = scr_addr + sil24_scr_map[sc_reg] * 4;
+               return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+       }
+       return 0xffffffffU;
+}
+
+static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+{
+       void *scr_addr = (void *)ap->ioaddr.scr_addr;
+       if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+               void *addr;
+               addr = scr_addr + sil24_scr_map[sc_reg] * 4;
+               writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
+       }
+}
+
+static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+       struct sil24_port_priv *pp = ap->private_data;
+       *tf = pp->tf;
+}
+
+static void sil24_phy_reset(struct ata_port *ap)
+{
+       __sata_phy_reset(ap);
+       /*
+        * No ATAPI yet.  Just unconditionally indicate ATA device.
+        * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA
+        * and libata core will ignore the device.
+        */
+       if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
+               ap->device[0].class = ATA_DEV_ATA;
+}
+
+static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
+                                struct sil24_cmd_block *cb)
+{
+       struct scatterlist *sg = qc->sg;
+       struct sil24_sge *sge = cb->sge;
+       unsigned i;
+
+       for (i = 0; i < qc->n_elem; i++, sg++, sge++) {
+               sge->addr = cpu_to_le64(sg_dma_address(sg));
+               sge->cnt = cpu_to_le32(sg_dma_len(sg));
+               sge->flags = 0;
+               sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM);
+       }
+}
+
+static void sil24_qc_prep(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct sil24_port_priv *pp = ap->private_data;
+       struct sil24_cmd_block *cb = pp->cmd_block + qc->tag;
+       struct sil24_prb *prb = &cb->prb;
+
+       switch (qc->tf.protocol) {
+       case ATA_PROT_PIO:
+       case ATA_PROT_DMA:
+       case ATA_PROT_NODATA:
+               break;
+       default:
+               /* ATAPI isn't supported yet */
+               BUG();
+       }
+
+       ata_tf_to_fis(&qc->tf, prb->fis, 0);
+
+       if (qc->flags & ATA_QCFLAG_DMAMAP)
+               sil24_fill_sg(qc, cb);
+}
+
+static int sil24_qc_issue(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       void *port = (void *)ap->ioaddr.cmd_addr;
+       struct sil24_port_priv *pp = ap->private_data;
+       dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block);
+
+       writel((u32)paddr, port + PORT_CMD_ACTIVATE);
+       return 0;
+}
+
+static void sil24_irq_clear(struct ata_port *ap)
+{
+       /* unused */
+}
+
+static int __sil24_reset_controller(void *port)
+{
+       int cnt;
+       u32 tmp;
+
+       /* Reset controller state.  Is this correct? */
+       writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
+       readl(port + PORT_CTRL_STAT);   /* sync */
+
+       /* Max ~100ms */
+       for (cnt = 0; cnt < 1000; cnt++) {
+               udelay(100);
+               tmp = readl(port + PORT_CTRL_STAT);
+               if (!(tmp & PORT_CS_DEV_RST))
+                       break;
+       }
+
+       if (tmp & PORT_CS_DEV_RST)
+               return -1;
+       return 0;
+}
+
+static void sil24_reset_controller(struct ata_port *ap)
+{
+       printk(KERN_NOTICE DRV_NAME
+              " ata%u: resetting controller...\n", ap->id);
+       if (__sil24_reset_controller((void *)ap->ioaddr.cmd_addr))
+                printk(KERN_ERR DRV_NAME
+                       " ata%u: failed to reset controller\n", ap->id);
+}
+
+static void sil24_eng_timeout(struct ata_port *ap)
+{
+       struct ata_queued_cmd *qc;
+
+       qc = ata_qc_from_tag(ap, ap->active_tag);
+       if (!qc) {
+               printk(KERN_ERR "ata%u: BUG: tiemout without command\n",
+                      ap->id);
+               return;
+       }
+
+       /*
+        * hack alert!  We cannot use the supplied completion
+        * function from inside the ->eh_strategy_handler() thread.
+        * libata is the only user of ->eh_strategy_handler() in
+        * any kernel, so the default scsi_done() assumes it is
+        * not being called from the SCSI EH.
+        */
+       printk(KERN_ERR "ata%u: command timeout\n", ap->id);
+       qc->scsidone = scsi_finish_command;
+       ata_qc_complete(qc, ATA_ERR);
+
+       sil24_reset_controller(ap);
+}
+
+static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
+{
+       struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+       struct sil24_port_priv *pp = ap->private_data;
+       void *port = (void *)ap->ioaddr.cmd_addr;
+       u32 irq_stat, cmd_err, sstatus, serror;
+
+       irq_stat = readl(port + PORT_IRQ_STAT);
+       writel(irq_stat, port + PORT_IRQ_STAT);         /* clear irq */
+
+       if (!(irq_stat & PORT_IRQ_ERROR)) {
+               /* ignore non-completion, non-error irqs for now */
+               printk(KERN_WARNING DRV_NAME
+                      "ata%u: non-error exception irq (irq_stat %x)\n",
+                      ap->id, irq_stat);
+               return;
+       }
+
+       cmd_err = readl(port + PORT_CMD_ERR);
+       sstatus = readl(port + PORT_SSTATUS);
+       serror = readl(port + PORT_SERROR);
+       if (serror)
+               writel(serror, port + PORT_SERROR);
+
+       printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n"
+              "  stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
+              ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
+
+       if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
+               /*
+                * Device is reporting error, tf registers are valid.
+                */
+               sil24_update_tf(ap);
+       } else {
+               /*
+                * Other errors.  libata currently doesn't have any
+                * mechanism to report these errors.  Just turn on
+                * ATA_ERR.
+                */
+               pp->tf.command = ATA_ERR;
+       }
+
+       if (qc)
+               ata_qc_complete(qc, pp->tf.command);
+
+       sil24_reset_controller(ap);
+}
+
+static inline void sil24_host_intr(struct ata_port *ap)
+{
+       struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+       void *port = (void *)ap->ioaddr.cmd_addr;
+       u32 slot_stat;
+
+       slot_stat = readl(port + PORT_SLOT_STAT);
+       if (!(slot_stat & HOST_SSTAT_ATTN)) {
+               struct sil24_port_priv *pp = ap->private_data;
+               /*
+                * !HOST_SSAT_ATTN guarantees successful completion,
+                * so reading back tf registers is unnecessary for
+                * most commands.  TODO: read tf registers for
+                * commands which require these values on successful
+                * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
+                * DEVICE RESET and READ PORT MULTIPLIER (any more?).
+                */
+               sil24_update_tf(ap);
+
+               if (qc)
+                       ata_qc_complete(qc, pp->tf.command);
+       } else
+               sil24_error_intr(ap, slot_stat);
+}
+
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ata_host_set *host_set = dev_instance;
+       struct sil24_host_priv *hpriv = host_set->private_data;
+       unsigned handled = 0;
+       u32 status;
+       int i;
+
+       status = readl(hpriv->host_base + HOST_IRQ_STAT);
+
+       if (status == 0xffffffff) {
+               printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
+                      "PCI fault or device removal?\n");
+               goto out;
+       }
+
+       if (!(status & IRQ_STAT_4PORTS))
+               goto out;
+
+       spin_lock(&host_set->lock);
+
+       for (i = 0; i < host_set->n_ports; i++)
+               if (status & (1 << i)) {
+                       struct ata_port *ap = host_set->ports[i];
+                       if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+                               sil24_host_intr(host_set->ports[i]);
+                               handled++;
+                       } else
+                               printk(KERN_ERR DRV_NAME
+                                      ": interrupt from disabled port %d\n", i);
+               }
+
+       spin_unlock(&host_set->lock);
+ out:
+       return IRQ_RETVAL(handled);
+}
+
+static int sil24_port_start(struct ata_port *ap)
+{
+       struct device *dev = ap->host_set->dev;
+       struct sil24_port_priv *pp;
+       struct sil24_cmd_block *cb;
+       size_t cb_size = sizeof(*cb);
+       dma_addr_t cb_dma;
+
+       pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+       if (!pp)
+               return -ENOMEM;
+       memset(pp, 0, sizeof(*pp));
+
+       pp->tf.command = ATA_DRDY;
+
+       cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
+       if (!cb) {
+               kfree(pp);
+               return -ENOMEM;
+       }
+       memset(cb, 0, cb_size);
+
+       pp->cmd_block = cb;
+       pp->cmd_block_dma = cb_dma;
+
+       ap->private_data = pp;
+
+       return 0;
+}
+
+static void sil24_port_stop(struct ata_port *ap)
+{
+       struct device *dev = ap->host_set->dev;
+       struct sil24_port_priv *pp = ap->private_data;
+       size_t cb_size = sizeof(*pp->cmd_block);
+
+       dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
+       kfree(pp);
+}
+
+static void sil24_host_stop(struct ata_host_set *host_set)
+{
+       struct sil24_host_priv *hpriv = host_set->private_data;
+
+       iounmap(hpriv->host_base);
+       iounmap(hpriv->port_base);
+       kfree(hpriv);
+}
+
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       static int printed_version = 0;
+       unsigned int board_id = (unsigned int)ent->driver_data;
+       struct ata_port_info *pinfo = &sil24_port_info[board_id];
+       struct ata_probe_ent *probe_ent = NULL;
+       struct sil24_host_priv *hpriv = NULL;
+       void *host_base = NULL, *port_base = NULL;
+       int i, rc;
+
+       if (!printed_version++)
+               printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+       rc = pci_enable_device(pdev);
+       if (rc)
+               return rc;
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc)
+               goto out_disable;
+
+       rc = -ENOMEM;
+       /* ioremap mmio registers */
+       host_base = ioremap(pci_resource_start(pdev, 0),
+                           pci_resource_len(pdev, 0));
+       if (!host_base)
+               goto out_free;
+       port_base = ioremap(pci_resource_start(pdev, 2),
+                           pci_resource_len(pdev, 2));
+       if (!port_base)
+               goto out_free;
+
+       /* allocate & init probe_ent and hpriv */
+       probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+       if (!probe_ent)
+               goto out_free;
+
+       hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
+       if (!hpriv)
+               goto out_free;
+
+       memset(probe_ent, 0, sizeof(*probe_ent));
+       probe_ent->dev = pci_dev_to_dev(pdev);
+       INIT_LIST_HEAD(&probe_ent->node);
+
+       probe_ent->sht          = pinfo->sht;
+       probe_ent->host_flags   = pinfo->host_flags;
+       probe_ent->pio_mask     = pinfo->pio_mask;
+       probe_ent->udma_mask    = pinfo->udma_mask;
+       probe_ent->port_ops     = pinfo->port_ops;
+       probe_ent->n_ports      = SIL24_FLAG2NPORTS(pinfo->host_flags);
+
+       probe_ent->irq = pdev->irq;
+       probe_ent->irq_flags = SA_SHIRQ;
+       probe_ent->mmio_base = port_base;
+       probe_ent->private_data = hpriv;
+
+       memset(hpriv, 0, sizeof(*hpriv));
+       hpriv->host_base = host_base;
+       hpriv->port_base = port_base;
+
+       /*
+        * Configure the device
+        */
+       /*
+        * FIXME: This device is certainly 64-bit capable.  We just
+        * don't know how to use it.  After fixing 32bit activation in
+        * this function, enable 64bit masks here.
+        */
+       rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+       if (rc) {
+               printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n",
+                      pci_name(pdev));
+               goto out_free;
+       }
+       rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+       if (rc) {
+               printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n",
+                      pci_name(pdev));
+               goto out_free;
+       }
+
+       /* GPIO off */
+       writel(0, host_base + HOST_FLASH_CMD);
+
+       /* Mask interrupts during initialization */
+       writel(0, host_base + HOST_CTRL);
+
+       for (i = 0; i < probe_ent->n_ports; i++) {
+               void *port = port_base + i * PORT_REGS_SIZE;
+               unsigned long portu = (unsigned long)port;
+               u32 tmp;
+               int cnt;
+
+               probe_ent->port[i].cmd_addr = portu + PORT_PRB;
+               probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
+
+               ata_std_ports(&probe_ent->port[i]);
+
+               /* Initial PHY setting */
+               writel(0x20c, port + PORT_PHY_CFG);
+
+               /* Clear port RST */
+               tmp = readl(port + PORT_CTRL_STAT);
+               if (tmp & PORT_CS_PORT_RST) {
+                       writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+                       readl(port + PORT_CTRL_STAT);   /* sync */
+                       for (cnt = 0; cnt < 10; cnt++) {
+                               msleep(10);
+                               tmp = readl(port + PORT_CTRL_STAT);
+                               if (!(tmp & PORT_CS_PORT_RST))
+                                       break;
+                       }
+                       if (tmp & PORT_CS_PORT_RST)
+                               printk(KERN_ERR DRV_NAME
+                                      "(%s): failed to clear port RST\n",
+                                      pci_name(pdev));
+               }
+
+               /* Zero error counters. */
+               writel(0x8000, port + PORT_DECODE_ERR_THRESH);
+               writel(0x8000, port + PORT_CRC_ERR_THRESH);
+               writel(0x8000, port + PORT_HSHK_ERR_THRESH);
+               writel(0x0000, port + PORT_DECODE_ERR_CNT);
+               writel(0x0000, port + PORT_CRC_ERR_CNT);
+               writel(0x0000, port + PORT_HSHK_ERR_CNT);
+
+               /* FIXME: 32bit activation? */
+               writel(0, port + PORT_ACTIVATE_UPPER_ADDR);
+               writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
+
+               /* Configure interrupts */
+               writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
+               writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
+                      port + PORT_IRQ_ENABLE_SET);
+
+               /* Clear interrupts */
+               writel(0x0fff0fff, port + PORT_IRQ_STAT);
+               writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
+
+               /* Clear port multiplier enable and resume bits */
+               writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
+
+               /* Reset itself */
+               if (__sil24_reset_controller(port))
+                       printk(KERN_ERR DRV_NAME
+                              "(%s): failed to reset controller\n",
+                              pci_name(pdev));
+       }
+
+       /* Turn on interrupts */
+       writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
+
+       pci_set_master(pdev);
+
+       /* FIXME: check ata_device_add return value */
+       ata_device_add(probe_ent);
+
+       kfree(probe_ent);
+       return 0;
+
+ out_free:
+       if (host_base)
+               iounmap(host_base);
+       if (port_base)
+               iounmap(port_base);
+       kfree(probe_ent);
+       kfree(hpriv);
+       pci_release_regions(pdev);
+ out_disable:
+       pci_disable_device(pdev);
+       return rc;
+}
+
+static int __init sil24_init(void)
+{
+       return pci_module_init(&sil24_pci_driver);
+}
+
+static void __exit sil24_exit(void)
+{
+       pci_unregister_driver(&sil24_pci_driver);
+}
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
+
+module_init(sil24_init);
+module_exit(sil24_exit);
index b227e51d12f4c3b0f8a591e72967cbe0ebda1bc4..057f7b98b6c448dea78cc02a4c6688f545c5bb05 100644 (file)
@@ -102,7 +102,7 @@ static Scsi_Host_Template sis_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations sis_ops = {
+static const struct ata_port_operations sis_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = ata_tf_load,
        .tf_read                = ata_tf_read,
@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_regions;
 
        ppi = &sis_port_info;
-       probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+       probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
        if (!probe_ent) {
                rc = -ENOMEM;
                goto err_out_regions;
index d89d968bedace68ad91771a168455b0164b12bb0..e0f9570bc6ddd391a96cfb6c53eeb40b63eaef5b 100644 (file)
@@ -102,7 +102,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
 }
 
 
-static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -297,7 +297,7 @@ static Scsi_Host_Template k2_sata_sht = {
 };
 
 
-static struct ata_port_operations k2_sata_ops = {
+static const struct ata_port_operations k2_sata_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = k2_sata_tf_load,
        .tf_read                = k2_sata_tf_read,
index 540a851911723b5329e67f6f63981acba6975339..af08f4f650c1ef6da8526b3524259a4b031fa159 100644 (file)
@@ -137,7 +137,7 @@ struct pdc_port_priv {
 };
 
 struct pdc_host_priv {
-       void                    *dimm_mmio;
+       void                    __iomem *dimm_mmio;
 
        unsigned int            doing_hdma;
        unsigned int            hdma_prod;
@@ -157,8 +157,8 @@ static void pdc_20621_phy_reset (struct ata_port *ap);
 static int pdc_port_start(struct ata_port *ap);
 static void pdc_port_stop(struct ata_port *ap);
 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc20621_host_stop(struct ata_host_set *host_set);
 static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
 static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
@@ -196,7 +196,7 @@ static Scsi_Host_Template pdc_sata_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations pdc_20621_ops = {
+static const struct ata_port_operations pdc_20621_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = pdc_tf_load_mmio,
        .tf_read                = ata_tf_read,
@@ -247,7 +247,7 @@ static void pdc20621_host_stop(struct ata_host_set *host_set)
 {
        struct pci_dev *pdev = to_pci_dev(host_set->dev);
        struct pdc_host_priv *hpriv = host_set->private_data;
-       void *dimm_mmio = hpriv->dimm_mmio;
+       void __iomem *dimm_mmio = hpriv->dimm_mmio;
 
        pci_iounmap(pdev, dimm_mmio);
        kfree(hpriv);
@@ -669,8 +669,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
                readl(mmio + PDC_20621_SEQCTL + (seq * 4));     /* flush */
 
                writel(port_ofs + PDC_DIMM_ATA_PKT,
-                      (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
-               readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+                      (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+               readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
                VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
                        port_ofs + PDC_DIMM_ATA_PKT,
                        port_ofs + PDC_DIMM_ATA_PKT,
@@ -747,8 +747,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
                        writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
                        readl(mmio + PDC_20621_SEQCTL + (seq * 4));
                        writel(port_ofs + PDC_DIMM_ATA_PKT,
-                              (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
-                       readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+                              (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+                       readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
                }
 
                /* step two - execute ATA command */
@@ -899,7 +899,7 @@ out:
        DPRINTK("EXIT\n");
 }
 
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        WARN_ON (tf->protocol == ATA_PROT_DMA ||
                 tf->protocol == ATA_PROT_NODATA);
@@ -907,7 +907,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
 }
 
 
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        WARN_ON (tf->protocol == ATA_PROT_DMA ||
                 tf->protocol == ATA_PROT_NODATA);
@@ -1014,7 +1014,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
        idx++;
        dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
                (long) (window_size - offset);
-       memcpy_toio((char *) (dimm_mmio + offset / 4), (char *) psource, dist);
+       memcpy_toio(dimm_mmio + offset / 4, psource, dist);
        writel(0x01, mmio + PDC_GENERAL_CTLR);
        readl(mmio + PDC_GENERAL_CTLR);
 
@@ -1023,8 +1023,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
        for (; (long) size >= (long) window_size ;) {
                writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
                readl(mmio + PDC_DIMM_WINDOW_CTLR);
-               memcpy_toio((char *) (dimm_mmio), (char *) psource,
-                           window_size / 4);
+               memcpy_toio(dimm_mmio, psource, window_size / 4);
                writel(0x01, mmio + PDC_GENERAL_CTLR);
                readl(mmio + PDC_GENERAL_CTLR);
                psource += window_size;
@@ -1035,7 +1034,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
        if (size) {
                writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
                readl(mmio + PDC_DIMM_WINDOW_CTLR);
-               memcpy_toio((char *) (dimm_mmio), (char *) psource, size / 4);
+               memcpy_toio(dimm_mmio, psource, size / 4);
                writel(0x01, mmio + PDC_GENERAL_CTLR);
                readl(mmio + PDC_GENERAL_CTLR);
        }
index 4c9fb8b71be1cb5cb64a047e71d64fd433776882..d68dc7d3422c4c54384789b24f605db45962e3e9 100644 (file)
@@ -90,7 +90,7 @@ static Scsi_Host_Template uli_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations uli_ops = {
+static const struct ata_port_operations uli_ops = {
        .port_disable           = ata_port_disable,
 
        .tf_load                = ata_tf_load,
@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_regions;
 
        ppi = &uli_port_info;
-       probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+       probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
        if (!probe_ent) {
                rc = -ENOMEM;
                goto err_out_regions;
index 128b996b07b70167e3c928a62c035249cbc2e34f..80e291a909a972be9dc34366df1ab407689a22fc 100644 (file)
@@ -109,7 +109,7 @@ static Scsi_Host_Template svia_sht = {
        .ordered_flush          = 1,
 };
 
-static struct ata_port_operations svia_sata_ops = {
+static const struct ata_port_operations svia_sata_ops = {
        .port_disable           = ata_port_disable,
 
        .tf_load                = ata_tf_load,
@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
        struct ata_probe_ent *probe_ent;
        struct ata_port_info *ppi = &svia_port_info;
 
-       probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+       probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
        if (!probe_ent)
                return NULL;
 
index cf94e0158a8df5820a28214c92bcf35842fdb33f..5af05fdf8544392cc8eb6a1d4d950c39fa808b81 100644 (file)
@@ -86,7 +86,7 @@ static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
        if (sc_reg > SCR_CONTROL)
                return 0xffffffffU;
-       return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+       return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -95,16 +95,16 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
 {
        if (sc_reg > SCR_CONTROL)
                return;
-       writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+       writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
 static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
 {
-       unsigned long mask_addr;
+       void __iomem *mask_addr;
        u8 mask;
 
-       mask_addr = (unsigned long) ap->host_set->mmio_base +
+       mask_addr = ap->host_set->mmio_base +
                VSC_SATA_INT_MASK_OFFSET + ap->port_no;
        mask = readb(mask_addr);
        if (ctl & ATA_NIEN)
@@ -115,7 +115,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
 }
 
 
-static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
        unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -231,7 +231,7 @@ static Scsi_Host_Template vsc_sata_sht = {
 };
 
 
-static struct ata_port_operations vsc_sata_ops = {
+static const struct ata_port_operations vsc_sata_ops = {
        .port_disable           = ata_port_disable,
        .tf_load                = vsc_sata_tf_load,
        .tf_read                = vsc_sata_tf_read,
@@ -283,7 +283,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
        struct ata_probe_ent *probe_ent = NULL;
        unsigned long base;
        int pci_dev_busy = 0;
-       void *mmio_base;
+       void __iomem *mmio_base;
        int rc;
 
        if (!printed_version++)
index 1f0ebabf6d47663ffd0f9e6e62fc4b7eff21c422..a5711d545d713501a60d57bfa7296007cf3dda3e 100644 (file)
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(scsi_device_types);
  * Returns:     Pointer to request block.
  */
 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
-                                          int gfp_mask)
+                                          gfp_t gfp_mask)
 {
        const int offset = ALIGN(sizeof(struct scsi_request), 4);
        const int size = offset + sizeof(struct request);
@@ -196,7 +196,7 @@ struct scsi_host_cmd_pool {
        unsigned int    users;
        char            *name;
        unsigned int    slab_flags;
-       unsigned int    gfp_mask;
+       gfp_t           gfp_mask;
 };
 
 static struct scsi_host_cmd_pool scsi_cmd_pool = {
@@ -213,7 +213,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
 static DECLARE_MUTEX(host_cmd_pool_mutex);
 
 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
-                                           int gfp_mask)
+                                           gfp_t gfp_mask)
 {
        struct scsi_cmnd *cmd;
 
@@ -245,7 +245,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
  *
  * Returns:    The allocated scsi command structure.
  */
-struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
+struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
 {
        struct scsi_cmnd *cmd;
 
index de7f98cc38feb020863323b06e4fd6d2e4f426a9..6a3f6aae8a976c8ef1011dafc6bd5c8806952109 100644 (file)
@@ -205,7 +205,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
        unsigned int inlen, outlen, cmdlen;
        unsigned int needed, buf_needed;
        int timeout, retries, result;
-       int data_direction, gfp_mask = GFP_KERNEL;
+       int data_direction;
+       gfp_t gfp_mask = GFP_KERNEL;
 
        if (!sic)
                return -EINVAL;
index 0074f28c37b2719e49388a0da93f6a9a87505a82..3ff538809786fb5647c0e8f2f6e74339aca0106d 100644 (file)
@@ -677,7 +677,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
        return NULL;
 }
 
-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
+static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 {
        struct scsi_host_sg_pool *sgp;
        struct scatterlist *sgl;
index ad94367df430a8d8df9d9b62109fc2bf545b7af9..fd56b7ec88b656e645a2af4551be76873d746531 100644 (file)
@@ -2644,7 +2644,7 @@ static char *
 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
 {
        char *resp = NULL;
-       int page_mask;
+       gfp_t page_mask;
        int order, a_size;
        int resSz = rqSz;
 
index d001c046551bcab0663ffa151bd83a382b6406dc..927d700f00736af5a0249cc8f17ee7bff6b90179 100644 (file)
@@ -3577,7 +3577,8 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
 static struct st_buffer *
  new_tape_buffer(int from_initialization, int need_dma, int max_sg)
 {
-       int i, priority, got = 0, segs = 0;
+       int i, got = 0, segs = 0;
+       gfp_t priority;
        struct st_buffer *tb;
 
        if (from_initialization)
@@ -3610,7 +3611,8 @@ static struct st_buffer *
 /* Try to allocate enough space in the tape buffer */
 static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
 {
-       int segs, nbr, max_segs, b_size, priority, order, got;
+       int segs, nbr, max_segs, b_size, order, got;
+       gfp_t priority;
 
        if (new_size <= STbuffer->buffer_size)
                return 1;
index 679e678c7e6a158e0b6e25aaf5ef383cae346f08..ddd0307fece23217108a8907cf94f5567f0ec7b6 100644 (file)
@@ -50,6 +50,7 @@
 
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/hardware.h>
 #include <asm/hardware/amba.h>
 #include <asm/hardware/amba_serial.h>
 
index 1ff629c7475008e41efbc7d81abca1a670994f91..938d185841c9eeb138f04b6c3389d48a03d0a11c 100644 (file)
@@ -50,6 +50,7 @@
 
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/sizes.h>
 #include <asm/hardware/amba.h>
 #include <asm/hardware/clock.h>
 #include <asm/hardware/amba_serial.h>
index 87ef368384fb31d9d055f169e1c1986e43db8659..6a67e8f585b307c9bd66f231846734a6962c6101 100644 (file)
@@ -408,7 +408,11 @@ static struct uart_port clps711x_ports[UART_NR] = {
        {
                .iobase         = SYSCON1,
                .irq            = IRQ_UTXINT1, /* IRQ_URXINT1, IRQ_UMSINT */
+#ifdef CONFIG_MP1000_90MHZ
+               .uartclk        = 4515840,
+#else
                .uartclk        = 3686400,
+#endif
                .fifosize       = 16,
                .ops            = &clps711x_pops,
                .line           = 0,
@@ -417,7 +421,11 @@ static struct uart_port clps711x_ports[UART_NR] = {
        {
                .iobase         = SYSCON2,
                .irq            = IRQ_UTXINT2, /* IRQ_URXINT2 */
+#ifdef CONFIG_MP1000_90MHZ
+               .uartclk        = 4515840,
+#else
                .uartclk        = 3686400,
+#endif
                .fifosize       = 16,
                .ops            = &clps711x_pops,
                .line           = 1,
@@ -551,6 +559,7 @@ console_initcall(clps711xuart_console_init);
 static struct uart_driver clps711x_reg = {
        .driver_name            = "ttyCL",
        .dev_name               = "ttyCL",
+       .devfs_name             = "ttyCL",
        .major                  = SERIAL_CLPS711X_MAJOR,
        .minor                  = SERIAL_CLPS711X_MINOR,
        .nr                     = UART_NR,
index 90c2a86c421b0cb3f8cb22eed95a120671d982d9..005f027e081a2a7ce6988697c7ae55468bb9a9e9 100644 (file)
@@ -358,6 +358,9 @@ static int serial_pxa_startup(struct uart_port *port)
        unsigned long flags;
        int retval;
 
+       if (port->line == 3) /* HWUART */
+               up->mcr |= UART_MCR_AFE;
+       else
        up->mcr = 0;
 
        /*
@@ -481,8 +484,10 @@ serial_pxa_set_termios(struct uart_port *port, struct termios *termios,
 
        if ((up->port.uartclk / quot) < (2400 * 16))
                fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1;
-       else
+       else if ((up->port.uartclk / quot) < (230400 * 16))
                fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8;
+       else
+               fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32;
 
        /*
         * Ok, we're now changing the port state.  Do it with
@@ -772,6 +777,20 @@ static struct uart_pxa_port serial_pxa_ports[] = {
                .ops            = &serial_pxa_pops,
                .line           = 2,
        },
+  }, {  /* HWUART */
+       .name   = "HWUART",
+       .cken   = CKEN4_HWUART,
+       .port = {
+               .type           = PORT_PXA,
+               .iotype         = UPIO_MEM,
+               .membase        = (void *)&HWUART,
+               .mapbase        = __PREG(HWUART),
+               .irq            = IRQ_HWUART,
+               .uartclk        = 921600 * 16,
+               .fifosize       = 64,
+               .ops            = &serial_pxa_pops,
+               .line           = 3,
+       },
   }
 };
 
index fc15b4acc8af33e8c70f8275de29af69e1b59e0b..57e800ac3cee80890eed1871dff6d7093e699bc9 100644 (file)
@@ -106,7 +106,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
 void *hcd_buffer_alloc (
        struct usb_bus          *bus,
        size_t                  size,
-       unsigned                mem_flags,
+       gfp_t                   mem_flags,
        dma_addr_t              *dma
 )
 {
index 1017a97a418b110e7e5d6a55b56727d2bf17e79f..ff19d64041b548baddc360f67fe6dcb921eed4ea 100644 (file)
@@ -1112,7 +1112,7 @@ static void urb_unlink (struct urb *urb)
  * expects usb_submit_urb() to have sanity checked and conditioned all
  * inputs in the urb
  */
-static int hcd_submit_urb (struct urb *urb, unsigned mem_flags)
+static int hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
 {
        int                     status;
        struct usb_hcd          *hcd = urb->dev->bus->hcpriv;
index ac451fa7e4d273787e814c08f5092c6fc1feab37..1f1ed6211af8a6aa17f13508b6d126130df844b4 100644 (file)
@@ -142,12 +142,12 @@ struct hcd_timeout {      /* timeouts we allocate */
 
 struct usb_operations {
        int (*get_frame_number) (struct usb_device *usb_dev);
-       int (*submit_urb) (struct urb *urb, unsigned mem_flags);
+       int (*submit_urb) (struct urb *urb, gfp_t mem_flags);
        int (*unlink_urb) (struct urb *urb, int status);
 
        /* allocate dma-consistent buffer for URB_DMA_NOMAPPING */
        void *(*buffer_alloc)(struct usb_bus *bus, size_t size,
-                       unsigned mem_flags,
+                       gfp_t mem_flags,
                        dma_addr_t *dma);
        void (*buffer_free)(struct usb_bus *bus, size_t size,
                        void *addr, dma_addr_t dma);
@@ -200,7 +200,7 @@ struct hc_driver {
        int     (*urb_enqueue) (struct usb_hcd *hcd,
                                        struct usb_host_endpoint *ep,
                                        struct urb *urb,
-                                       unsigned mem_flags);
+                                       gfp_t mem_flags);
        int     (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
 
        /* hw synch, freeing endpoint resources that urb_dequeue can't */
@@ -247,7 +247,7 @@ int hcd_buffer_create (struct usb_hcd *hcd);
 void hcd_buffer_destroy (struct usb_hcd *hcd);
 
 void *hcd_buffer_alloc (struct usb_bus *bus, size_t size,
-       unsigned mem_flags, dma_addr_t *dma);
+       gfp_t mem_flags, dma_addr_t *dma);
 void hcd_buffer_free (struct usb_bus *bus, size_t size,
        void *addr, dma_addr_t dma);
 
index f1fb67fe22a82806533a4875801fd14a7f180833..f9a81e84dbdf642d85d24f917192d393432d1765 100644 (file)
@@ -321,7 +321,7 @@ int usb_sg_init (
        struct scatterlist      *sg,
        int                     nents,
        size_t                  length,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 )
 {
        int                     i;
index c846fefb73862049fa7960dce85c7a4c96780b05..b32898e0a27d1da41c256f346347275d495b268e 100644 (file)
@@ -60,7 +60,7 @@ void usb_init_urb(struct urb *urb)
  *
  * The driver must call usb_free_urb() when it is finished with the urb.
  */
-struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags)
+struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
 {
        struct urb *urb;
 
@@ -224,7 +224,7 @@ struct urb * usb_get_urb(struct urb *urb)
  *      GFP_NOIO, unless b) or c) apply
  *
  */
-int usb_submit_urb(struct urb *urb, unsigned mem_flags)
+int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
 {
        int                     pipe, temp, max;
        struct usb_device       *dev;
index 7d131509e41909a0aa92bac2665cf1acbccb2d1b..4c57f3f649ede7894e1df8c7591440601f6ed867 100644 (file)
@@ -1147,7 +1147,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
 void *usb_buffer_alloc (
        struct usb_device *dev,
        size_t size,
-       unsigned mem_flags,
+       gfp_t mem_flags,
        dma_addr_t *dma
 )
 {
index 583db7c38cf192e7751eeacea3f8994ad8677803..8d9d8ee8955428a4dcd6ded1a666ed05a382aca2 100644 (file)
@@ -470,7 +470,7 @@ static int dummy_disable (struct usb_ep *_ep)
 }
 
 static struct usb_request *
-dummy_alloc_request (struct usb_ep *_ep, unsigned mem_flags)
+dummy_alloc_request (struct usb_ep *_ep, gfp_t mem_flags)
 {
        struct dummy_ep         *ep;
        struct dummy_request    *req;
@@ -507,7 +507,7 @@ dummy_alloc_buffer (
        struct usb_ep *_ep,
        unsigned bytes,
        dma_addr_t *dma,
-       unsigned mem_flags
+       gfp_t mem_flags
 ) {
        char                    *retval;
        struct dummy_ep         *ep;
@@ -541,7 +541,7 @@ fifo_complete (struct usb_ep *ep, struct usb_request *req)
 
 static int
 dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
-               unsigned mem_flags)
+               gfp_t mem_flags)
 {
        struct dummy_ep         *ep;
        struct dummy_request    *req;
@@ -999,7 +999,7 @@ static int dummy_urb_enqueue (
        struct usb_hcd                  *hcd,
        struct usb_host_endpoint        *ep,
        struct urb                      *urb,
-       unsigned                        mem_flags
+       gfp_t                           mem_flags
 ) {
        struct dummy    *dum;
        struct urbp     *urbp;
index 49459e33e952a654b4a58ffff03f3a52851f3346..f1024e804d5c4ae359c869c71122f0602b94105d 100644 (file)
@@ -945,11 +945,11 @@ config_buf (enum usb_device_speed speed,
 
 /*-------------------------------------------------------------------------*/
 
-static void eth_start (struct eth_dev *dev, unsigned gfp_flags);
-static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags);
+static void eth_start (struct eth_dev *dev, gfp_t gfp_flags);
+static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags);
 
 static int
-set_ether_config (struct eth_dev *dev, unsigned gfp_flags)
+set_ether_config (struct eth_dev *dev, gfp_t gfp_flags)
 {
        int                                     result = 0;
        struct usb_gadget                       *gadget = dev->gadget;
@@ -1081,7 +1081,7 @@ static void eth_reset_config (struct eth_dev *dev)
  * that returns config descriptors, and altsetting code.
  */
 static int
-eth_set_config (struct eth_dev *dev, unsigned number, unsigned gfp_flags)
+eth_set_config (struct eth_dev *dev, unsigned number, gfp_t gfp_flags)
 {
        int                     result = 0;
        struct usb_gadget       *gadget = dev->gadget;
@@ -1598,7 +1598,7 @@ static void defer_kevent (struct eth_dev *dev, int flag)
 static void rx_complete (struct usb_ep *ep, struct usb_request *req);
 
 static int
-rx_submit (struct eth_dev *dev, struct usb_request *req, unsigned gfp_flags)
+rx_submit (struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 {
        struct sk_buff          *skb;
        int                     retval = -ENOMEM;
@@ -1724,7 +1724,7 @@ clean:
 }
 
 static int prealloc (struct list_head *list, struct usb_ep *ep,
-                       unsigned n, unsigned gfp_flags)
+                       unsigned n, gfp_t gfp_flags)
 {
        unsigned                i;
        struct usb_request      *req;
@@ -1763,7 +1763,7 @@ extra:
        return 0;
 }
 
-static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags)
+static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
 {
        int status;
 
@@ -1779,7 +1779,7 @@ fail:
        return status;
 }
 
-static void rx_fill (struct eth_dev *dev, unsigned gfp_flags)
+static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
@@ -1962,7 +1962,7 @@ drop:
  * normally just one notification will be queued.
  */
 
-static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, unsigned);
+static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, gfp_t);
 static void eth_req_free (struct usb_ep *ep, struct usb_request *req);
 
 static void
@@ -2024,7 +2024,7 @@ static int rndis_control_ack (struct net_device *net)
 
 #endif /* RNDIS */
 
-static void eth_start (struct eth_dev *dev, unsigned gfp_flags)
+static void eth_start (struct eth_dev *dev, gfp_t gfp_flags)
 {
        DEBUG (dev, "%s\n", __FUNCTION__);
 
@@ -2092,7 +2092,7 @@ static int eth_stop (struct net_device *net)
 /*-------------------------------------------------------------------------*/
 
 static struct usb_request *
-eth_req_alloc (struct usb_ep *ep, unsigned size, unsigned gfp_flags)
+eth_req_alloc (struct usb_ep *ep, unsigned size, gfp_t gfp_flags)
 {
        struct usb_request      *req;
 
index eaab26f4ed3712688c28a67baea49f4368060355..b0f3cd63e3b9110ed8d3e73d10a2754b1917001f 100644 (file)
@@ -269,7 +269,7 @@ static int goku_ep_disable(struct usb_ep *_ep)
 /*-------------------------------------------------------------------------*/
 
 static struct usb_request *
-goku_alloc_request(struct usb_ep *_ep, unsigned gfp_flags)
+goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 {
        struct goku_request     *req;
 
@@ -327,7 +327,7 @@ goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
  */
 static void *
 goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
-                       dma_addr_t *dma, unsigned gfp_flags)
+                       dma_addr_t *dma, gfp_t gfp_flags)
 {
        void            *retval;
        struct goku_ep  *ep;
@@ -789,7 +789,7 @@ finished:
 /*-------------------------------------------------------------------------*/
 
 static int
-goku_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 {
        struct goku_request     *req;
        struct goku_ep          *ep;
index 4842577789c93f52bb95943a4f211ab5d69ef3af..012d1e5f1524385b32cdb0ad870744f264b50f1f 100644 (file)
@@ -71,13 +71,13 @@ static char *state_names[] = {
 static int lh7a40x_ep_enable(struct usb_ep *ep,
                             const struct usb_endpoint_descriptor *);
 static int lh7a40x_ep_disable(struct usb_ep *ep);
-static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, int);
+static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t);
 static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
 static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *,
-                                 int);
+                                 gfp_t);
 static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t,
                                unsigned);
-static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, int);
+static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t);
 static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
 static int lh7a40x_set_halt(struct usb_ep *ep, int);
 static int lh7a40x_fifo_status(struct usb_ep *ep);
@@ -1106,7 +1106,7 @@ static int lh7a40x_ep_disable(struct usb_ep *_ep)
 }
 
 static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep,
-                                                unsigned gfp_flags)
+                                                gfp_t gfp_flags)
 {
        struct lh7a40x_request *req;
 
@@ -1134,7 +1134,7 @@ static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
 }
 
 static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes,
-                                 dma_addr_t * dma, unsigned gfp_flags)
+                                 dma_addr_t * dma, gfp_t gfp_flags)
 {
        char *retval;
 
@@ -1158,7 +1158,7 @@ static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma,
  *  NOTE: Sets INDEX register
  */
 static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req,
-                        unsigned gfp_flags)
+                        gfp_t gfp_flags)
 {
        struct lh7a40x_request *req;
        struct lh7a40x_ep *ep;
index 477fab2e74d118375a22475aa05e07d7b0983d46..c32e1f7476da46d67b4e2f8336ab8cd5929b1b75 100644 (file)
@@ -376,7 +376,7 @@ static int net2280_disable (struct usb_ep *_ep)
 /*-------------------------------------------------------------------------*/
 
 static struct usb_request *
-net2280_alloc_request (struct usb_ep *_ep, unsigned gfp_flags)
+net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
 {
        struct net2280_ep       *ep;
        struct net2280_request  *req;
@@ -463,7 +463,7 @@ net2280_alloc_buffer (
        struct usb_ep           *_ep,
        unsigned                bytes,
        dma_addr_t              *dma,
-       unsigned                gfp_flags
+       gfp_t                   gfp_flags
 )
 {
        void                    *retval;
@@ -897,7 +897,7 @@ done (struct net2280_ep *ep, struct net2280_request *req, int status)
 /*-------------------------------------------------------------------------*/
 
 static int
-net2280_queue (struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 {
        struct net2280_request  *req;
        struct net2280_ep       *ep;
index ff5533e6956047fd25688d3fd37af3f23421b38f..287c5900fb13236def46a4a96764f10d6099352c 100644 (file)
@@ -269,7 +269,7 @@ static int omap_ep_disable(struct usb_ep *_ep)
 /*-------------------------------------------------------------------------*/
 
 static struct usb_request *
-omap_alloc_request(struct usb_ep *ep, unsigned gfp_flags)
+omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
 {
        struct omap_req *req;
 
@@ -298,7 +298,7 @@ omap_alloc_buffer(
        struct usb_ep   *_ep,
        unsigned        bytes,
        dma_addr_t      *dma,
-       unsigned        gfp_flags
+       gfp_t           gfp_flags
 )
 {
        void            *retval;
@@ -937,7 +937,7 @@ static void dma_channel_release(struct omap_ep *ep)
 /*-------------------------------------------------------------------------*/
 
 static int
-omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 {
        struct omap_ep  *ep = container_of(_ep, struct omap_ep, ep);
        struct omap_req *req = container_of(_req, struct omap_req, req);
index 73f8c9404156be4f6d2afa9351d8bd20a14e1bf6..3d4d89c371e7b2f50a7d076f1471463ab2f2e745 100644 (file)
@@ -332,7 +332,7 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
  *     pxa2xx_ep_alloc_request - allocate a request data structure
  */
 static struct usb_request *
-pxa2xx_ep_alloc_request (struct usb_ep *_ep, unsigned gfp_flags)
+pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
 {
        struct pxa2xx_request *req;
 
@@ -367,7 +367,7 @@ pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
  */
 static void *
 pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
-       dma_addr_t *dma, unsigned gfp_flags)
+       dma_addr_t *dma, gfp_t gfp_flags)
 {
        char                    *retval;
 
@@ -422,7 +422,7 @@ static inline void ep0_idle (struct pxa2xx_udc *dev)
 }
 
 static int
-write_packet(volatile unsigned long *uddr, struct pxa2xx_request *req, unsigned max)
+write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
 {
        u8              *buf;
        unsigned        length, count;
@@ -874,7 +874,7 @@ done:
 /*-------------------------------------------------------------------------*/
 
 static int
-pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 {
        struct pxa2xx_request   *req;
        struct pxa2xx_ep        *ep;
index a58f3e6e71f1ddbc5cb74c22db89740b77636278..19a883f7d1b8f610c8c9c3a0cba195aeacc88836 100644 (file)
@@ -69,11 +69,11 @@ struct pxa2xx_ep {
         * UDDR = UDC Endpoint Data Register (the fifo)
         * DRCM = DMA Request Channel Map
         */
-       volatile unsigned long                  *reg_udccs;
-       volatile unsigned long                  *reg_ubcr;
-       volatile unsigned long                  *reg_uddr;
+       volatile u32                            *reg_udccs;
+       volatile u32                            *reg_ubcr;
+       volatile u32                            *reg_uddr;
 #ifdef USE_DMA
-       volatile unsigned long                  *reg_drcmr;
+       volatile u32                    *reg_drcmr;
 #define        drcmr(n)  .reg_drcmr = & DRCMR ## n ,
 #else
 #define        drcmr(n)  
index c925d9222f53c4baaef730dd1046b8aa75620897..b35ac6d334f8e3aa5246c5d4d1e832349ce02994 100644 (file)
@@ -300,18 +300,18 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
                u8 type, unsigned int index, int is_otg);
 
 static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
-       unsigned kmalloc_flags);
+       gfp_t kmalloc_flags);
 static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
 
 static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
-       unsigned kmalloc_flags);
+       gfp_t kmalloc_flags);
 static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
 
-static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags);
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
 static void gs_free_ports(struct gs_dev *dev);
 
 /* circular buffer */
-static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags);
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags);
 static void gs_buf_free(struct gs_buf *gb);
 static void gs_buf_clear(struct gs_buf *gb);
 static unsigned int gs_buf_data_avail(struct gs_buf *gb);
@@ -2091,7 +2091,7 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
  * usb_request or NULL if there is an error.
  */
 static struct usb_request *
-gs_alloc_req(struct usb_ep *ep, unsigned int len, unsigned kmalloc_flags)
+gs_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t kmalloc_flags)
 {
        struct usb_request *req;
 
@@ -2132,7 +2132,7 @@ static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
  * endpoint, buffer len, and kmalloc flags.
  */
 static struct gs_req_entry *
-gs_alloc_req_entry(struct usb_ep *ep, unsigned len, unsigned kmalloc_flags)
+gs_alloc_req_entry(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
 {
        struct gs_req_entry     *req;
 
@@ -2173,7 +2173,7 @@ static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
  *
  * The device lock is normally held when calling this function.
  */
-static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags)
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags)
 {
        int i;
        struct gs_port *port;
@@ -2255,7 +2255,7 @@ static void gs_free_ports(struct gs_dev *dev)
  *
  * Allocate a circular buffer and all associated memory.
  */
-static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags)
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
 {
        struct gs_buf *gb;
 
index 6890e773b2a2a38f6ddf8469e0e141b47e8fb1ef..ec9c424f1d9706e59fa6a31612c96cacfdbd75ab 100644 (file)
@@ -612,7 +612,7 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
 }
 
 static struct usb_request *
-source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags)
+source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        int                     status;
@@ -640,7 +640,7 @@ source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags)
 }
 
 static int
-set_source_sink_config (struct zero_dev *dev, unsigned gfp_flags)
+set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags)
 {
        int                     result = 0;
        struct usb_ep           *ep;
@@ -744,7 +744,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
 }
 
 static int
-set_loopback_config (struct zero_dev *dev, unsigned gfp_flags)
+set_loopback_config (struct zero_dev *dev, gfp_t gfp_flags)
 {
        int                     result = 0;
        struct usb_ep           *ep;
@@ -845,7 +845,7 @@ static void zero_reset_config (struct zero_dev *dev)
  * by limiting configuration choices (like the pxa2xx).
  */
 static int
-zero_set_config (struct zero_dev *dev, unsigned number, unsigned gfp_flags)
+zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags)
 {
        int                     result = 0;
        struct usb_gadget       *gadget = dev->gadget;
index b948ffd94f4587ab3d07e6ccf1743a18a89e9ebd..f5eb9e7b5b1875ee8178404307ca45be1d5d5693 100644 (file)
@@ -983,7 +983,7 @@ static int ehci_urb_enqueue (
        struct usb_hcd  *hcd,
        struct usb_host_endpoint *ep,
        struct urb      *urb,
-       unsigned        mem_flags
+       gfp_t           mem_flags
 ) {
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
        struct list_head        qtd_list;
index 5c38ad869485e8870f20d1834f78eb264cbe8104..91c2ab43cbcc1a58412751505400e1db045f1909 100644 (file)
@@ -45,7 +45,7 @@ static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma)
        INIT_LIST_HEAD (&qtd->qtd_list);
 }
 
-static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
+static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
 {
        struct ehci_qtd         *qtd;
        dma_addr_t              dma;
@@ -79,7 +79,7 @@ static void qh_destroy (struct kref *kref)
        dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
 }
 
-static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags)
+static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
 {
        struct ehci_qh          *qh;
        dma_addr_t              dma;
@@ -161,7 +161,7 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci)
 }
 
 /* remember to add cleanup code (above) if you add anything here */
-static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
+static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
 {
        int i;
 
index 940d38ca7d91785534bffe0f8da2f5e015c563a2..5bb872c3496d70c3767d1b4f40a8fdd2979ba354 100644 (file)
@@ -477,7 +477,7 @@ qh_urb_transaction (
        struct ehci_hcd         *ehci,
        struct urb              *urb,
        struct list_head        *head,
-       int                     flags
+       gfp_t                   flags
 ) {
        struct ehci_qtd         *qtd, *qtd_prev;
        dma_addr_t              buf;
@@ -629,7 +629,7 @@ static struct ehci_qh *
 qh_make (
        struct ehci_hcd         *ehci,
        struct urb              *urb,
-       int                     flags
+       gfp_t                   flags
 ) {
        struct ehci_qh          *qh = ehci_qh_alloc (ehci, flags);
        u32                     info1 = 0, info2 = 0;
@@ -906,7 +906,7 @@ submit_async (
        struct usb_host_endpoint *ep,
        struct urb              *urb,
        struct list_head        *qtd_list,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 ) {
        struct ehci_qtd         *qtd;
        int                     epnum;
index ccc7300baa6d717a2bb1dab4140ba8d84337eb6d..f0c8aa1ccd5dc016aa8c61ea3b84944c99a25c1c 100644 (file)
@@ -589,7 +589,7 @@ static int intr_submit (
        struct usb_host_endpoint *ep,
        struct urb              *urb,
        struct list_head        *qtd_list,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 ) {
        unsigned                epnum;
        unsigned long           flags;
@@ -634,7 +634,7 @@ done:
 /* ehci_iso_stream ops work with both ITD and SITD */
 
 static struct ehci_iso_stream *
-iso_stream_alloc (unsigned mem_flags)
+iso_stream_alloc (gfp_t mem_flags)
 {
        struct ehci_iso_stream *stream;
 
@@ -851,7 +851,7 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
 /* ehci_iso_sched ops can be ITD-only or SITD-only */
 
 static struct ehci_iso_sched *
-iso_sched_alloc (unsigned packets, unsigned mem_flags)
+iso_sched_alloc (unsigned packets, gfp_t mem_flags)
 {
        struct ehci_iso_sched   *iso_sched;
        int                     size = sizeof *iso_sched;
@@ -924,7 +924,7 @@ itd_urb_transaction (
        struct ehci_iso_stream  *stream,
        struct ehci_hcd         *ehci,
        struct urb              *urb,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 )
 {
        struct ehci_itd         *itd;
@@ -1418,7 +1418,7 @@ itd_complete (
 /*-------------------------------------------------------------------------*/
 
 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
-       unsigned mem_flags)
+       gfp_t mem_flags)
 {
        int                     status = -EINVAL;
        unsigned long           flags;
@@ -1529,7 +1529,7 @@ sitd_urb_transaction (
        struct ehci_iso_stream  *stream,
        struct ehci_hcd         *ehci,
        struct urb              *urb,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 )
 {
        struct ehci_sitd        *sitd;
@@ -1779,7 +1779,7 @@ sitd_complete (
 
 
 static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
-       unsigned mem_flags)
+       gfp_t mem_flags)
 {
        int                     status = -EINVAL;
        unsigned long           flags;
index e142056b0d2cd5fc8c46e267564cc1ed2318033b..2548d94fcd72eac9056019a6ba5efb51028065ba 100644 (file)
@@ -694,7 +694,7 @@ static int balance(struct isp116x *isp116x, u16 period, u16 load)
 
 static int isp116x_urb_enqueue(struct usb_hcd *hcd,
                               struct usb_host_endpoint *hep, struct urb *urb,
-                              unsigned mem_flags)
+                              gfp_t mem_flags)
 {
        struct isp116x *isp116x = hcd_to_isp116x(hcd);
        struct usb_device *udev = urb->dev;
index 67c1aa5eb1c175994faa6593a33b009d496164cb..f8da8c7af7c66b69ff8746b943f0266b1bc3b8ce 100644 (file)
@@ -180,7 +180,7 @@ static int ohci_urb_enqueue (
        struct usb_hcd  *hcd,
        struct usb_host_endpoint *ep,
        struct urb      *urb,
-       unsigned        mem_flags
+       gfp_t           mem_flags
 ) {
        struct ohci_hcd *ohci = hcd_to_ohci (hcd);
        struct ed       *ed;
index fd3c4d3714bd80578b3ae62f8c659141855b0452..9fb83dfb1eb44114fbdb12df95c3a83f2e606dad 100644 (file)
@@ -84,7 +84,7 @@ dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
 
 /* TDs ... */
 static struct td *
-td_alloc (struct ohci_hcd *hc, unsigned mem_flags)
+td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
 {
        dma_addr_t      dma;
        struct td       *td;
@@ -118,7 +118,7 @@ td_free (struct ohci_hcd *hc, struct td *td)
 
 /* EDs ... */
 static struct ed *
-ed_alloc (struct ohci_hcd *hc, unsigned mem_flags)
+ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
 {
        dma_addr_t      dma;
        struct ed       *ed;
index d42a15d10a462de96a4ba16869990f4ae41f1057..cad858575cea783caab2656313fbbb455337a984 100644 (file)
@@ -818,7 +818,7 @@ static int sl811h_urb_enqueue(
        struct usb_hcd          *hcd,
        struct usb_host_endpoint *hep,
        struct urb              *urb,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 ) {
        struct sl811            *sl811 = hcd_to_sl811(hcd);
        struct usb_device       *udev = urb->dev;
index ea0d168a8c676a520b9309227db4b1b8960bc8a5..4e0fbe2c1a9ab9cd1b53175a46caae0b7541b0d3 100644 (file)
@@ -1164,7 +1164,7 @@ static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
 
 static int uhci_urb_enqueue(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep,
-               struct urb *urb, unsigned mem_flags)
+               struct urb *urb, gfp_t mem_flags)
 {
        int ret;
        struct uhci_hcd *uhci = hcd_to_uhci(hcd);
index 03fb70ef2eb3ec14e60c5f32188a24d78d5295b4..0592cb5e6c4d65c6fdf7ecd6e7777bb145f1d4ab 100644 (file)
@@ -137,7 +137,7 @@ static void async_complete(struct urb *urb, struct pt_regs *ptregs)
 
 static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv,
                                                         __u8 request, __u8 requesttype, __u16 value, __u16 index,
-                                                        unsigned int mem_flags)
+                                                        gfp_t mem_flags)
 {
        struct usb_device *usbdev;
        struct uss720_async_request *rq;
@@ -204,7 +204,7 @@ static unsigned int kill_all_async_requests_priv(struct parport_uss720_private *
 
 /* --------------------------------------------------------------------- */
 
-static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, unsigned int mem_flags)
+static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags)
 {
        struct parport_uss720_private *priv;
        struct uss720_async_request *rq;
@@ -238,7 +238,7 @@ static int get_1284_register(struct parport *pp, unsigned char reg, unsigned cha
        return -EIO;
 }
 
-static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, unsigned int mem_flags)
+static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags)
 {
        struct parport_uss720_private *priv;
        struct uss720_async_request *rq;
index 861f00a4375010315c7d59e9092cebd75ab90624..252a34fbb42cc24931703fcdbac29d6657942421 100644 (file)
@@ -753,7 +753,7 @@ static int ax88772_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 }
 
 static struct sk_buff *ax88772_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
-                                       unsigned flags)
+                                       gfp_t flags)
 {
        int padlen;
        int headroom = skb_headroom(skb);
index c8763ae33c737a3729fabc233551a014dfbd0c27..c0f263b202a60ea70eeda768593d7ee8337b1190 100644 (file)
@@ -301,7 +301,7 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 }
 
 static struct sk_buff *
-genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
        int     padlen;
        int     length = skb->len;
index e04b0ce3611a3e0b9999bdf6ecc8bacedf032fa9..c82655d3d448ebf31ff19137cd4b39e3f9ea4965 100644 (file)
@@ -477,13 +477,13 @@ static int kaweth_reset(struct kaweth_device *kaweth)
 }
 
 static void kaweth_usb_receive(struct urb *, struct pt_regs *regs);
-static int kaweth_resubmit_rx_urb(struct kaweth_device *, unsigned);
+static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t);
 
 /****************************************************************
        int_callback
 *****************************************************************/
 
-static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, int mf)
+static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf)
 {
        int status;
 
@@ -550,7 +550,7 @@ static void kaweth_resubmit_tl(void *d)
  *     kaweth_resubmit_rx_urb
  ****************************************************************/
 static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
-                                               unsigned mem_flags)
+                                               gfp_t mem_flags)
 {
        int result;
 
index a4309c4a491b88b07fc2bcdcf4609df4ab7ab3d9..cee55f8cf64fb5b2ff97b9f146615bcbb91e2146 100644 (file)
@@ -500,7 +500,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 }
 
 static struct sk_buff *
-net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
        int                     padlen;
        struct sk_buff          *skb2;
index 2ed2e5fb77780e2f8a46c487aecae6734aa109ac..b5a925dc1beb2623c97d78973944f5c3157fc1bd 100644 (file)
@@ -517,7 +517,7 @@ static int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 }
 
 static struct sk_buff *
-rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
        struct rndis_data_hdr   *hdr;
        struct sk_buff          *skb2;
index 6c460918d54f53a81bb6b6c0cd5fd163796c66a1..fce81d73893393ca9db60554448370caf7ade939 100644 (file)
@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
 static void rx_complete (struct urb *urb, struct pt_regs *regs);
 
-static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags)
+static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 {
        struct sk_buff          *skb;
        struct skb_data         *entry;
index 7aa0abd1a9bd2c0a3386034c28b78dcdc95fa364..89fc4958eecf81f72c5f93128d5994168b7f3c43 100644 (file)
@@ -107,7 +107,7 @@ struct driver_info {
 
        /* fixup tx packet (add framing) */
        struct sk_buff  *(*tx_fixup)(struct usbnet *dev,
-                               struct sk_buff *skb, unsigned flags);
+                               struct sk_buff *skb, gfp_t flags);
 
        /* for new devices, use the descriptor-reading code instead */
        int             in;             /* rx endpoint */
index ee3b892aeabce231d83b5c712423ba8b8bcaf609..5d4b7d55b097c8937032e335243a51e510e0dfaa 100644 (file)
@@ -62,7 +62,7 @@
  */
 
 static struct sk_buff *
-zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
        int                     padlen;
        struct sk_buff          *skb2;
index c4e479ee926aa75b3b53e27eb5744fca5c20859a..2f52261c7cc13eb0e23dd7f8430dfe9df3fe4b9f 100644 (file)
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int
        int reqlen;
        char seq=0;
        struct urb *urb;
-       unsigned int gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
+       gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
 
        len += 4;                       /* first 4 are for header */
 
index 321dbe91dc14e270450216cf4cc79562d89ca225..cde6fd8eb390a6b85798ce321bf5ced9b6abb9fb 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ioport.h>
 #include <linux/list.h>
 
+#include <asm/sizes.h>
 #include <asm/hardware/amba.h>
 #include <asm/hardware/clock.h>
 
index 23c125128024adb92a3f6023790e0e8d3f7db402..0d576987ec670dae2349d0586defdeb58dc52356 100644 (file)
@@ -29,7 +29,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
 
 static int afs_file_readpage(struct file *file, struct page *page);
 static int afs_file_invalidatepage(struct page *page, unsigned long offset);
-static int afs_file_releasepage(struct page *page, int gfp_flags);
+static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
 
 static ssize_t afs_file_write(struct file *file, const char __user *buf,
                              size_t size, loff_t *off);
@@ -279,7 +279,7 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
 /*
  * release a page and cleanup its private data
  */
-static int afs_file_releasepage(struct page *page, int gfp_flags)
+static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
 {
        struct cachefs_page *pageio;
 
index 7d81a93afd480c9d9f0335c208f130946b1dc259..460554b07ff91501739ae7b2f21ea14392ea5e51 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -778,7 +778,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
 
 
 static struct bio *__bio_map_kern(request_queue_t *q, void *data,
-                                 unsigned int len, unsigned int gfp_mask)
+                                 unsigned int len, gfp_t gfp_mask)
 {
        unsigned long kaddr = (unsigned long)data;
        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -825,7 +825,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
  *     device. Returns an error pointer in case of error.
  */
 struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
-                        unsigned int gfp_mask)
+                        gfp_t gfp_mask)
 {
        struct bio *bio;
 
index 1216c0d3c8ce179adecc466a772d06ea26f4ed8d..b1667986442f130b9fe523b85f174966100cd09e 100644 (file)
@@ -502,7 +502,7 @@ static void free_more_memory(void)
        yield();
 
        for_each_pgdat(pgdat) {
-               zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
+               zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
                if (*zones)
                        try_to_free_pages(zones, GFP_NOFS);
        }
@@ -1571,7 +1571,7 @@ static inline void discard_buffer(struct buffer_head * bh)
  *
  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
  */
-int try_to_release_page(struct page *page, int gfp_mask)
+int try_to_release_page(struct page *page, gfp_t gfp_mask)
 {
        struct address_space * const mapping = page->mapping;
 
index fb10386c59bed6b34a51af4fb930eb79c86ac586..e90512ed35a4eca72e839434a075d1a4b8886c24 100644 (file)
@@ -689,7 +689,7 @@ void shrink_dcache_anon(struct hlist_head *head)
  *
  * In this case we return -1 to tell the caller that we baled.
  */
-static int shrink_dcache_memory(int nr, unsigned int gfp_mask)
+static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
 {
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
index b9732335bcdcd6006733a9561e9ef8ceb8769117..05f3327d64a3f7b93b064af0e323b34dbeff97af 100644 (file)
@@ -500,7 +500,7 @@ static void prune_dqcache(int count)
  * more memory
  */
 
-static int shrink_dqcache_memory(int nr, unsigned int gfp_mask)
+static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
 {
        if (nr) {
                spin_lock(&dq_list_lock);
index a04a575ad433c7143dd376ab96d99ae9a782ace8..d2208f7c87db81c30d8bdd3ddf965e887f693e4a 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -126,8 +126,7 @@ asmlinkage long sys_uselib(const char __user * library)
        struct nameidata nd;
        int error;
 
-       nd.intent.open.flags = FMODE_READ;
-       error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+       error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ);
        if (error)
                goto out;
 
@@ -139,7 +138,7 @@ asmlinkage long sys_uselib(const char __user * library)
        if (error)
                goto exit;
 
-       file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+       file = nameidata_to_filp(&nd, O_RDONLY);
        error = PTR_ERR(file);
        if (IS_ERR(file))
                goto out;
@@ -167,6 +166,7 @@ asmlinkage long sys_uselib(const char __user * library)
 out:
        return error;
 exit:
+       release_open_intent(&nd);
        path_release(&nd);
        goto out;
 }
@@ -490,8 +490,7 @@ struct file *open_exec(const char *name)
        int err;
        struct file *file;
 
-       nd.intent.open.flags = FMODE_READ;
-       err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+       err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ);
        file = ERR_PTR(err);
 
        if (!err) {
@@ -504,7 +503,7 @@ struct file *open_exec(const char *name)
                                err = -EACCES;
                        file = ERR_PTR(err);
                        if (!err) {
-                               file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+                               file = nameidata_to_filp(&nd, O_RDONLY);
                                if (!IS_ERR(file)) {
                                        err = deny_write_access(file);
                                        if (err) {
@@ -516,6 +515,7 @@ out:
                                return file;
                        }
                }
+               release_open_intent(&nd);
                path_release(&nd);
        }
        goto out;
index b5177c90d6f111e63fe3d13f498c4bd93306b22f..8b38f2232796fda9c0f25294654c49a8d25cde11 100644 (file)
@@ -1434,7 +1434,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
        return journal_invalidatepage(journal, page, offset);
 }
 
-static int ext3_releasepage(struct page *page, int wait)
+static int ext3_releasepage(struct page *page, gfp_t wait)
 {
        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
 
index f1570b9f9de30427f133670f02893a67726ed6ed..3f680c5675bf5c7e45a9805b6102752e8d0f0b1e 100644 (file)
@@ -46,7 +46,7 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, hfs_get_block);
 }
 
-static int hfs_releasepage(struct page *page, int mask)
+static int hfs_releasepage(struct page *page, gfp_t mask)
 {
        struct inode *inode = page->mapping->host;
        struct super_block *sb = inode->i_sb;
index d5642705f6336279088811e6e109a4e6629b0a8f..f205773ddfbebc661fce1278306234cfd1538945 100644 (file)
@@ -40,7 +40,7 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, hfsplus_get_block);
 }
 
-static int hfsplus_releasepage(struct page *page, int mask)
+static int hfsplus_releasepage(struct page *page, gfp_t mask)
 {
        struct inode *inode = page->mapping->host;
        struct super_block *sb = inode->i_sb;
index f80a79ff156b5c4feafe54d17fee693fdec6b71c..7d3316527767ce9896f5d299d0bb18a7baea6538 100644 (file)
@@ -475,7 +475,7 @@ static void prune_icache(int nr_to_scan)
  * This function is passed the number of inodes to scan, and it returns the
  * total number of remaining possibly-reclaimable inodes.
  */
-static int shrink_icache_memory(int nr, unsigned int gfp_mask)
+static int shrink_icache_memory(int nr, gfp_t gfp_mask)
 {
        if (nr) {
                /*
index 7ae2c4fe506bb5bd7b2ac21c6bbf80795741361d..e4b516ac4989ef3ddef852f5c764ef816e0c3a56 100644 (file)
@@ -1606,7 +1606,7 @@ int journal_blocks_per_page(struct inode *inode)
  * Simple support for retrying memory allocations.  Introduced to help to
  * debug different VM deadlock avoidance strategies. 
  */
-void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry)
+void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
 {
        return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
 }
index 49bbc2be3d72937ba4719e656a140bf1c9ea7aba..13cb05bf60489e0005d635d66a63b47692ed72a6 100644 (file)
@@ -1621,7 +1621,7 @@ out:
  * while the data is part of a transaction.  Yes?
  */
 int journal_try_to_free_buffers(journal_t *journal, 
-                               struct page *page, int unused_gfp_mask)
+                               struct page *page, gfp_t unused_gfp_mask)
 {
        struct buffer_head *head;
        struct buffer_head *bh;
index 13d7e3f1feb4e0c8a0d18462fd5cfb657702da1f..eeb37d70e6509439498aa224b7f00f723d00270e 100644 (file)
@@ -198,7 +198,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
        }
 }
 
-static inline struct metapage *alloc_metapage(unsigned int gfp_mask)
+static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
 {
        return mempool_alloc(metapage_mempool, gfp_mask);
 }
@@ -534,7 +534,7 @@ add_failed:
        return -EIO;
 }
 
-static int metapage_releasepage(struct page *page, int gfp_mask)
+static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
 {
        struct metapage *mp;
        int busy = 0;
index 82c77df81c5f1e2ed706d7f4be5206b121dd1454..c4c8601096e03f384160e2c9c923f6793a288735 100644 (file)
@@ -173,11 +173,10 @@ nlm_bind_host(struct nlm_host *host)
 
        /* If we've already created an RPC client, check whether
         * RPC rebind is required
-        * Note: why keep rebinding if we're on a tcp connection?
         */
        if ((clnt = host->h_rpcclnt) != NULL) {
                xprt = clnt->cl_xprt;
-               if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
+               if (time_after_eq(jiffies, host->h_nextrebind)) {
                        clnt->cl_port = 0;
                        host->h_nextrebind = jiffies + NLM_HOST_REBIND;
                        dprintk("lockd: next rebind in %ld jiffies\n",
@@ -189,7 +188,6 @@ nlm_bind_host(struct nlm_host *host)
                        goto forgetit;
 
                xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
-               xprt->nocong = 1;       /* No congestion control for NLM */
                xprt->resvport = 1;     /* NLM requires a reserved port */
 
                /* Existing NLM servers accept AUTH_UNIX only */
index f7daa5f48949c41ea02b687e1521a440387b0592..a1e8b2248014c0aa1d7ae8b2766440f753929908 100644 (file)
@@ -316,21 +316,22 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
        /* POSIX-1996 leaves the case l->l_len < 0 undefined;
           POSIX-2001 defines it. */
        start += l->l_start;
-       end = start + l->l_len - 1;
-       if (l->l_len < 0) {
+       if (start < 0)
+               return -EINVAL;
+       fl->fl_end = OFFSET_MAX;
+       if (l->l_len > 0) {
+               end = start + l->l_len - 1;
+               fl->fl_end = end;
+       } else if (l->l_len < 0) {
                end = start - 1;
+               fl->fl_end = end;
                start += l->l_len;
+               if (start < 0)
+                       return -EINVAL;
        }
-
-       if (start < 0)
-               return -EINVAL;
-       if (l->l_len > 0 && end < 0)
-               return -EOVERFLOW;
-
        fl->fl_start = start;   /* we record the absolute position */
-       fl->fl_end = end;
-       if (l->l_len == 0)
-               fl->fl_end = OFFSET_MAX;
+       if (fl->fl_end < fl->fl_start)
+               return -EOVERFLOW;
        
        fl->fl_owner = current->files;
        fl->fl_pid = current->tgid;
@@ -362,14 +363,21 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
                return -EINVAL;
        }
 
-       if (((start += l->l_start) < 0) || (l->l_len < 0))
+       start += l->l_start;
+       if (start < 0)
                return -EINVAL;
-       fl->fl_end = start + l->l_len - 1;
-       if (l->l_len > 0 && fl->fl_end < 0)
-               return -EOVERFLOW;
+       fl->fl_end = OFFSET_MAX;
+       if (l->l_len > 0) {
+               fl->fl_end = start + l->l_len - 1;
+       } else if (l->l_len < 0) {
+               fl->fl_end = start - 1;
+               start += l->l_len;
+               if (start < 0)
+                       return -EINVAL;
+       }
        fl->fl_start = start;   /* we record the absolute position */
-       if (l->l_len == 0)
-               fl->fl_end = OFFSET_MAX;
+       if (fl->fl_end < fl->fl_start)
+               return -EOVERFLOW;
        
        fl->fl_owner = current->files;
        fl->fl_pid = current->tgid;
@@ -829,12 +837,16 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
                /* Detect adjacent or overlapping regions (if same lock type)
                 */
                if (request->fl_type == fl->fl_type) {
+                       /* In all comparisons of start vs end, use
+                        * "start - 1" rather than "end + 1". If end
+                        * is OFFSET_MAX, end + 1 will become negative.
+                        */
                        if (fl->fl_end < request->fl_start - 1)
                                goto next_lock;
                        /* If the next lock in the list has entirely bigger
                         * addresses than the new one, insert the lock here.
                         */
-                       if (fl->fl_start > request->fl_end + 1)
+                       if (fl->fl_start - 1 > request->fl_end)
                                break;
 
                        /* If we come here, the new and old lock are of the
index b002a088857da50e26efcfc72d8f76e0d0b8379c..298997f174755649a7b3ca5b72bb9e2613a53464 100644 (file)
@@ -116,7 +116,7 @@ mb_cache_indexes(struct mb_cache *cache)
  * What the mbcache registers as to get shrunk dynamically.
  */
 
-static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
+static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
 
 
 static inline int
@@ -140,7 +140,7 @@ __mb_cache_entry_unhash(struct mb_cache_entry *ce)
 
 
 static inline void
-__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
+__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
 {
        struct mb_cache *cache = ce->e_cache;
 
@@ -193,7 +193,7 @@ forget:
  * Returns the number of objects which are present in the cache.
  */
 static int
-mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
+mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(free_list);
        struct list_head *l, *ltmp;
index aa62dbda93ac01234670658a315c5472552238aa..aaaa810362344af7790e066a47a233100d491542 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/syscalls.h>
 #include <linux/mount.h>
 #include <linux/audit.h>
+#include <linux/file.h>
 #include <asm/namei.h>
 #include <asm/uaccess.h>
 
@@ -317,6 +318,18 @@ void path_release_on_umount(struct nameidata *nd)
        mntput_no_expire(nd->mnt);
 }
 
+/**
+ * release_open_intent - free up open intent resources
+ * @nd: pointer to nameidata
+ */
+void release_open_intent(struct nameidata *nd)
+{
+       if (nd->intent.open.file->f_dentry == NULL)
+               put_filp(nd->intent.open.file);
+       else
+               fput(nd->intent.open.file);
+}
+
 /*
  * Internal lookup() using the new generic dcache.
  * SMP-safe
@@ -750,6 +763,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
                struct qstr this;
                unsigned int c;
 
+               nd->flags |= LOOKUP_CONTINUE;
                err = exec_permission_lite(inode, nd);
                if (err == -EAGAIN) { 
                        err = permission(inode, MAY_EXEC, nd);
@@ -802,7 +816,6 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
                        if (err < 0)
                                break;
                }
-               nd->flags |= LOOKUP_CONTINUE;
                /* This does the actual lookups.. */
                err = do_lookup(nd, &this, &next);
                if (err)
@@ -1052,6 +1065,70 @@ out:
        return retval;
 }
 
+static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags,
+               struct nameidata *nd, int open_flags, int create_mode)
+{
+       struct file *filp = get_empty_filp();
+       int err;
+
+       if (filp == NULL)
+               return -ENFILE;
+       nd->intent.open.file = filp;
+       nd->intent.open.flags = open_flags;
+       nd->intent.open.create_mode = create_mode;
+       err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd);
+       if (IS_ERR(nd->intent.open.file)) {
+               if (err == 0) {
+                       err = PTR_ERR(nd->intent.open.file);
+                       path_release(nd);
+               }
+       } else if (err != 0)
+               release_open_intent(nd);
+       return err;
+}
+
+/**
+ * path_lookup_open - lookup a file path with open intent
+ * @name: pointer to file name
+ * @lookup_flags: lookup intent flags
+ * @nd: pointer to nameidata
+ * @open_flags: open intent flags
+ */
+int path_lookup_open(const char *name, unsigned int lookup_flags,
+               struct nameidata *nd, int open_flags)
+{
+       return __path_lookup_intent_open(name, lookup_flags, nd,
+                       open_flags, 0);
+}
+
+/**
+ * path_lookup_create - lookup a file path with open + create intent
+ * @name: pointer to file name
+ * @lookup_flags: lookup intent flags
+ * @nd: pointer to nameidata
+ * @open_flags: open intent flags
+ * @create_mode: create intent flags
+ */
+int path_lookup_create(const char *name, unsigned int lookup_flags,
+               struct nameidata *nd, int open_flags, int create_mode)
+{
+       return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd,
+                       open_flags, create_mode);
+}
+
+int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
+               struct nameidata *nd, int open_flags)
+{
+       char *tmp = getname(name);
+       int err = PTR_ERR(tmp);
+
+       if (!IS_ERR(tmp)) {
+               err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0);
+               putname(tmp);
+       }
+       return err;
+}
+
 /*
  * Restricted form of lookup. Doesn't follow links, single-component only,
  * needs parent already locked. Doesn't follow mounts.
@@ -1416,27 +1493,27 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
  */
 int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
 {
-       int acc_mode, error = 0;
+       int acc_mode, error;
        struct path path;
        struct dentry *dir;
        int count = 0;
 
        acc_mode = ACC_MODE(flag);
 
+       /* O_TRUNC implies we need access checks for write permissions */
+       if (flag & O_TRUNC)
+               acc_mode |= MAY_WRITE;
+
        /* Allow the LSM permission hook to distinguish append 
           access from general write access. */
        if (flag & O_APPEND)
                acc_mode |= MAY_APPEND;
 
-       /* Fill in the open() intent data */
-       nd->intent.open.flags = flag;
-       nd->intent.open.create_mode = mode;
-
        /*
         * The simplest case - just a plain lookup.
         */
        if (!(flag & O_CREAT)) {
-               error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd);
+               error = path_lookup_open(pathname, lookup_flags(flag), nd, flag);
                if (error)
                        return error;
                goto ok;
@@ -1445,7 +1522,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
        /*
         * Create - we need to know the parent.
         */
-       error = path_lookup(pathname, LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE, nd);
+       error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode);
        if (error)
                return error;
 
@@ -1520,6 +1597,8 @@ ok:
 exit_dput:
        dput_path(&path, nd);
 exit:
+       if (!IS_ERR(nd->intent.open.file))
+               release_open_intent(nd);
        path_release(nd);
        return error;
 
index 4a36839f0bbdc7adb31dfdae9d8a739bc6a1b0ab..44135af9894cc44bf733546d18513ce360c86fda 100644 (file)
@@ -142,7 +142,7 @@ static void nfs_msync_inode(struct inode *inode)
 /*
  * Basic procedure for returning a delegation to the server
  */
-int nfs_inode_return_delegation(struct inode *inode)
+int __nfs_inode_return_delegation(struct inode *inode)
 {
        struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
        struct nfs_inode *nfsi = NFS_I(inode);
index 3f6c45a29d6a4978d2a7a5bf39b8d2110b309564..8017846b561f9ece35baa6489432b6d721c96e42 100644 (file)
@@ -25,7 +25,7 @@ struct nfs_delegation {
 
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
-int nfs_inode_return_delegation(struct inode *inode);
+int __nfs_inode_return_delegation(struct inode *inode);
 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
 
 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
@@ -47,11 +47,25 @@ static inline int nfs_have_delegation(struct inode *inode, int flags)
                return 1;
        return 0;
 }
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+       int err = 0;
+
+       if (NFS_I(inode)->delegation != NULL)
+               err = __nfs_inode_return_delegation(inode);
+       return err;
+}
 #else
 static inline int nfs_have_delegation(struct inode *inode, int flags)
 {
        return 0;
 }
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+       return 0;
+}
 #endif
 
 #endif
index 2df639f143e8065cdd647044a4283e28fe89d092..8272ed3fc70797160bbfc8e3bf764ba4e099b0b8 100644 (file)
@@ -532,6 +532,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        my_entry.eof = 0;
        my_entry.fh = &fh;
        my_entry.fattr = &fattr;
+       nfs_fattr_init(&fattr);
        desc->entry = &my_entry;
 
        while(!desc->entry->eof) {
@@ -565,8 +566,6 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
        }
        unlock_kernel();
-       if (desc->error < 0)
-               return desc->error;
        if (res < 0)
                return res;
        return 0;
@@ -803,6 +802,7 @@ static int nfs_dentry_delete(struct dentry *dentry)
  */
 static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
 {
+       nfs_inode_return_delegation(inode);
        if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
                lock_kernel();
                inode->i_nlink--;
@@ -853,12 +853,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
        dentry->d_op = NFS_PROTO(dir)->dentry_ops;
 
        lock_kernel();
-       /* Revalidate parent directory attribute cache */
-       error = nfs_revalidate_inode(NFS_SERVER(dir), dir);
-       if (error < 0) {
-               res = ERR_PTR(error);
-               goto out_unlock;
-       }
 
        /* If we're doing an exclusive create, optimize away the lookup */
        if (nfs_is_exclusive_create(dir, nd))
@@ -916,7 +910,6 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd)
 static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
        struct dentry *res = NULL;
-       struct inode *inode = NULL;
        int error;
 
        /* Check that we are indeed trying to open this file */
@@ -930,8 +923,10 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
        dentry->d_op = NFS_PROTO(dir)->dentry_ops;
 
        /* Let vfs_create() deal with O_EXCL */
-       if (nd->intent.open.flags & O_EXCL)
-               goto no_entry;
+       if (nd->intent.open.flags & O_EXCL) {
+               d_add(dentry, NULL);
+               goto out;
+       }
 
        /* Open the file on the server */
        lock_kernel();
@@ -945,32 +940,30 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
 
        if (nd->intent.open.flags & O_CREAT) {
                nfs_begin_data_update(dir);
-               inode = nfs4_atomic_open(dir, dentry, nd);
+               res = nfs4_atomic_open(dir, dentry, nd);
                nfs_end_data_update(dir);
        } else
-               inode = nfs4_atomic_open(dir, dentry, nd);
+               res = nfs4_atomic_open(dir, dentry, nd);
        unlock_kernel();
-       if (IS_ERR(inode)) {
-               error = PTR_ERR(inode);
+       if (IS_ERR(res)) {
+               error = PTR_ERR(res);
                switch (error) {
                        /* Make a negative dentry */
                        case -ENOENT:
-                               inode = NULL;
-                               break;
+                               res = NULL;
+                               goto out;
                        /* This turned out not to be a regular file */
+                       case -EISDIR:
+                       case -ENOTDIR:
+                               goto no_open;
                        case -ELOOP:
                                if (!(nd->intent.open.flags & O_NOFOLLOW))
                                        goto no_open;
-                       /* case -EISDIR: */
                        /* case -EINVAL: */
                        default:
-                               res = ERR_PTR(error);
                                goto out;
                }
-       }
-no_entry:
-       res = d_add_unique(dentry, inode);
-       if (res != NULL)
+       } else if (res != NULL)
                dentry = res;
        nfs_renew_times(dentry);
        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -1014,7 +1007,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
         */
        lock_kernel();
        verifier = nfs_save_change_attribute(dir);
-       ret = nfs4_open_revalidate(dir, dentry, openflags);
+       ret = nfs4_open_revalidate(dir, dentry, openflags, nd);
        if (!ret)
                nfs_set_verifier(dentry, verifier);
        unlock_kernel();
@@ -1137,7 +1130,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
 
        lock_kernel();
        nfs_begin_data_update(dir);
-       error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
+       error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd);
        nfs_end_data_update(dir);
        if (error != 0)
                goto out_err;
@@ -1332,6 +1325,7 @@ static int nfs_safe_remove(struct dentry *dentry)
 
        nfs_begin_data_update(dir);
        if (inode != NULL) {
+               nfs_inode_return_delegation(inode);
                nfs_begin_data_update(inode);
                error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
                /* The VFS may want to delete this inode */
@@ -1438,17 +1432,14 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
                old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
                dentry->d_parent->d_name.name, dentry->d_name.name);
 
-       /*
-        * Drop the dentry in advance to force a new lookup.
-        * Since nfs_proc_link doesn't return a file handle,
-        * we can't use the existing dentry.
-        */
        lock_kernel();
-       d_drop(dentry);
-
        nfs_begin_data_update(dir);
        nfs_begin_data_update(inode);
        error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
+       if (error == 0) {
+               atomic_inc(&inode->i_count);
+               d_instantiate(dentry, inode);
+       }
        nfs_end_data_update(inode);
        nfs_end_data_update(dir);
        unlock_kernel();
@@ -1512,9 +1503,11 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
         */
        if (!new_inode)
                goto go_ahead;
-       if (S_ISDIR(new_inode->i_mode))
-               goto out;
-       else if (atomic_read(&new_dentry->d_count) > 2) {
+       if (S_ISDIR(new_inode->i_mode)) {
+               error = -EISDIR;
+               if (!S_ISDIR(old_inode->i_mode))
+                       goto out;
+       } else if (atomic_read(&new_dentry->d_count) > 2) {
                int err;
                /* copy the target dentry's name */
                dentry = d_alloc(new_dentry->d_parent,
@@ -1539,7 +1532,8 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 #endif
                        goto out;
                }
-       }
+       } else
+               new_inode->i_nlink--;
 
 go_ahead:
        /*
@@ -1549,6 +1543,7 @@ go_ahead:
                nfs_wb_all(old_inode);
                shrink_dcache_parent(old_dentry);
        }
+       nfs_inode_return_delegation(old_inode);
 
        if (new_inode)
                d_delete(new_dentry);
index 6bdcfa95de94630771ecbe79393c0cfdfd4f22bd..57d3e77d97ee1eb2e9c291695078d8b06f902d24 100644 (file)
@@ -205,8 +205,8 @@ nfs_file_flush(struct file *file)
        if (!status) {
                status = ctx->error;
                ctx->error = 0;
-               if (!status && !nfs_have_delegation(inode, FMODE_READ))
-                       __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+               if (!status)
+                       nfs_revalidate_inode(NFS_SERVER(inode), inode);
        }
        unlock_kernel();
        return status;
@@ -376,22 +376,31 @@ out_swapfile:
 
 static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
 {
+       struct file_lock *cfl;
        struct inode *inode = filp->f_mapping->host;
        int status = 0;
 
        lock_kernel();
-       /* Use local locking if mounted with "-onolock" */
-       if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
-               status = NFS_PROTO(inode)->lock(filp, cmd, fl);
-       else {
-               struct file_lock *cfl = posix_test_lock(filp, fl);
-
-               fl->fl_type = F_UNLCK;
-               if (cfl != NULL)
-                       memcpy(fl, cfl, sizeof(*fl));
+       /* Try local locking first */
+       cfl = posix_test_lock(filp, fl);
+       if (cfl != NULL) {
+               locks_copy_lock(fl, cfl);
+               goto out;
        }
+
+       if (nfs_have_delegation(inode, FMODE_READ))
+               goto out_noconflict;
+
+       if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)
+               goto out_noconflict;
+
+       status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+out:
        unlock_kernel();
        return status;
+out_noconflict:
+       fl->fl_type = F_UNLCK;
+       goto out;
 }
 
 static int do_vfs_lock(struct file *file, struct file_lock *fl)
index d4eadeea128e94f738c013eabe1bc69167645092..f2781ca42761c885a56ad2ea70effd4ca5f4599a 100644 (file)
@@ -358,6 +358,35 @@ out_no_root:
        return no_root_error;
 }
 
+static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans)
+{
+       to->to_initval = timeo * HZ / 10;
+       to->to_retries = retrans;
+       if (!to->to_retries)
+               to->to_retries = 2;
+
+       switch (proto) {
+       case IPPROTO_TCP:
+               if (!to->to_initval)
+                       to->to_initval = 60 * HZ;
+               if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
+                       to->to_initval = NFS_MAX_TCP_TIMEOUT;
+               to->to_increment = to->to_initval;
+               to->to_maxval = to->to_initval + (to->to_increment * to->to_retries);
+               to->to_exponential = 0;
+               break;
+       case IPPROTO_UDP:
+       default:
+               if (!to->to_initval)
+                       to->to_initval = 11 * HZ / 10;
+               if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
+                       to->to_initval = NFS_MAX_UDP_TIMEOUT;
+               to->to_maxval = NFS_MAX_UDP_TIMEOUT;
+               to->to_exponential = 1;
+               break;
+       }
+}
+
 /*
  * Create an RPC client handle.
  */
@@ -367,22 +396,12 @@ nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data)
        struct rpc_timeout      timeparms;
        struct rpc_xprt         *xprt = NULL;
        struct rpc_clnt         *clnt = NULL;
-       int                     tcp   = (data->flags & NFS_MOUNT_TCP);
-
-       /* Initialize timeout values */
-       timeparms.to_initval = data->timeo * HZ / 10;
-       timeparms.to_retries = data->retrans;
-       timeparms.to_maxval  = tcp ? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT;
-       timeparms.to_exponential = 1;
+       int                     proto = (data->flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP;
 
-       if (!timeparms.to_initval)
-               timeparms.to_initval = (tcp ? 600 : 11) * HZ / 10;
-       if (!timeparms.to_retries)
-               timeparms.to_retries = 5;
+       nfs_init_timeout_values(&timeparms, proto, data->timeo, data->retrans);
 
        /* create transport and client */
-       xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP,
-                                &server->addr, &timeparms);
+       xprt = xprt_create_proto(proto, &server->addr, &timeparms);
        if (IS_ERR(xprt)) {
                dprintk("%s: cannot create RPC transport. Error = %ld\n",
                                __FUNCTION__, PTR_ERR(xprt));
@@ -576,7 +595,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
                { NFS_MOUNT_SOFT, ",soft", ",hard" },
                { NFS_MOUNT_INTR, ",intr", "" },
                { NFS_MOUNT_POSIX, ",posix", "" },
-               { NFS_MOUNT_TCP, ",tcp", ",udp" },
                { NFS_MOUNT_NOCTO, ",nocto", "" },
                { NFS_MOUNT_NOAC, ",noac", "" },
                { NFS_MOUNT_NONLM, ",nolock", ",lock" },
@@ -585,6 +603,8 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
        };
        struct proc_nfs_info *nfs_infop;
        struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+       char buf[12];
+       char *proto;
 
        seq_printf(m, ",v%d", nfss->rpc_ops->version);
        seq_printf(m, ",rsize=%d", nfss->rsize);
@@ -603,6 +623,18 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
                else
                        seq_puts(m, nfs_infop->nostr);
        }
+       switch (nfss->client->cl_xprt->prot) {
+               case IPPROTO_TCP:
+                       proto = "tcp";
+                       break;
+               case IPPROTO_UDP:
+                       proto = "udp";
+                       break;
+               default:
+                       snprintf(buf, sizeof(buf), "%u", nfss->client->cl_xprt->prot);
+                       proto = buf;
+       }
+       seq_printf(m, ",proto=%s", proto);
        seq_puts(m, ",addr=");
        seq_escape(m, nfss->hostname, " \t\n\\");
        return 0;
@@ -753,7 +785,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                else
                        init_special_inode(inode, inode->i_mode, fattr->rdev);
 
-               nfsi->read_cache_jiffies = fattr->timestamp;
+               nfsi->read_cache_jiffies = fattr->time_start;
+               nfsi->last_updated = jiffies;
                inode->i_atime = fattr->atime;
                inode->i_mtime = fattr->mtime;
                inode->i_ctime = fattr->ctime;
@@ -821,6 +854,11 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
                        filemap_fdatawait(inode->i_mapping);
                nfs_wb_all(inode);
        }
+       /*
+        * Return any delegations if we're going to change ACLs
+        */
+       if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
+               nfs_inode_return_delegation(inode);
        error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
        if (error == 0)
                nfs_refresh_inode(inode, &fattr);
@@ -1019,15 +1057,11 @@ int nfs_open(struct inode *inode, struct file *filp)
        ctx->mode = filp->f_mode;
        nfs_file_set_open_context(filp, ctx);
        put_nfs_open_context(ctx);
-       if ((filp->f_mode & FMODE_WRITE) != 0)
-               nfs_begin_data_update(inode);
        return 0;
 }
 
 int nfs_release(struct inode *inode, struct file *filp)
 {
-       if ((filp->f_mode & FMODE_WRITE) != 0)
-               nfs_end_data_update(inode);
        nfs_file_clear_open_context(filp);
        return 0;
 }
@@ -1083,14 +1117,15 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
                goto out;
        }
 
+       spin_lock(&inode->i_lock);
        status = nfs_update_inode(inode, &fattr, verifier);
        if (status) {
+               spin_unlock(&inode->i_lock);
                dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
                         inode->i_sb->s_id,
                         (long long)NFS_FILEID(inode), status);
                goto out;
        }
-       spin_lock(&inode->i_lock);
        cache_validity = nfsi->cache_validity;
        nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
 
@@ -1098,7 +1133,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
         * We may need to keep the attributes marked as invalid if
         * we raced with nfs_end_attr_update().
         */
-       if (verifier == nfsi->cache_change_attribute)
+       if (time_after_eq(verifier, nfsi->cache_change_attribute))
                nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
        spin_unlock(&inode->i_lock);
 
@@ -1165,7 +1200,7 @@ void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
                if (S_ISDIR(inode->i_mode)) {
                        memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
                        /* This ensures we revalidate child dentries */
-                       nfsi->cache_change_attribute++;
+                       nfsi->cache_change_attribute = jiffies;
                }
                spin_unlock(&inode->i_lock);
 
@@ -1197,20 +1232,19 @@ void nfs_end_data_update(struct inode *inode)
        struct nfs_inode *nfsi = NFS_I(inode);
 
        if (!nfs_have_delegation(inode, FMODE_READ)) {
-               /* Mark the attribute cache for revalidation */
-               spin_lock(&inode->i_lock);
-               nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
-               /* Directories and symlinks: invalidate page cache too */
-               if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+               /* Directories and symlinks: invalidate page cache */
+               if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+                       spin_lock(&inode->i_lock);
                        nfsi->cache_validity |= NFS_INO_INVALID_DATA;
-               spin_unlock(&inode->i_lock);
+                       spin_unlock(&inode->i_lock);
+               }
        }
-       nfsi->cache_change_attribute ++;
+       nfsi->cache_change_attribute = jiffies;
        atomic_dec(&nfsi->data_updates);
 }
 
 /**
- * nfs_refresh_inode - verify consistency of the inode attribute cache
+ * nfs_check_inode_attributes - verify consistency of the inode attribute cache
  * @inode - pointer to inode
  * @fattr - updated attributes
  *
@@ -1218,13 +1252,12 @@ void nfs_end_data_update(struct inode *inode)
  * so that fattr carries weak cache consistency data, then it may
  * also update the ctime/mtime/change_attribute.
  */
-int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
+static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
        loff_t cur_size, new_isize;
        int data_unstable;
 
-       spin_lock(&inode->i_lock);
 
        /* Are we in the process of updating data on the server? */
        data_unstable = nfs_caches_unstable(inode);
@@ -1288,11 +1321,67 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
        if (!timespec_equal(&inode->i_atime, &fattr->atime))
                nfsi->cache_validity |= NFS_INO_INVALID_ATIME;
 
-       nfsi->read_cache_jiffies = fattr->timestamp;
-       spin_unlock(&inode->i_lock);
+       nfsi->read_cache_jiffies = fattr->time_start;
        return 0;
 }
 
+/**
+ * nfs_refresh_inode - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * Check that an RPC call that returned attributes has not overlapped with
+ * other recent updates of the inode metadata, then decide whether it is
+ * safe to do a full update of the inode attributes, or whether just to
+ * call nfs_check_inode_attributes.
+ */
+int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+       int status;
+
+       if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+               return 0;
+       spin_lock(&inode->i_lock);
+       nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
+       if (nfs_verify_change_attribute(inode, fattr->time_start))
+               nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
+       if (time_after(fattr->time_start, nfsi->last_updated))
+               status = nfs_update_inode(inode, fattr, fattr->time_start);
+       else
+               status = nfs_check_inode_attributes(inode, fattr);
+
+       spin_unlock(&inode->i_lock);
+       return status;
+}
+
+/**
+ * nfs_post_op_update_inode - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * After an operation that has changed the inode metadata, mark the
+ * attribute cache as being invalid, then try to update it.
+ */
+int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+       int status = 0;
+
+       spin_lock(&inode->i_lock);
+       if (unlikely((fattr->valid & NFS_ATTR_FATTR) == 0)) {
+               nfsi->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+               goto out;
+       }
+       status = nfs_update_inode(inode, fattr, fattr->time_start);
+       if (time_after_eq(fattr->time_start, nfsi->cache_change_attribute))
+               nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE);
+       nfsi->cache_change_attribute = jiffies;
+out:
+       spin_unlock(&inode->i_lock);
+       return status;
+}
+
 /*
  * Many nfs protocol calls return the new file attributes after
  * an operation.  Here we update the inode to reflect the state
@@ -1328,20 +1417,17 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
                goto out_err;
        }
 
-       spin_lock(&inode->i_lock);
-
        /*
         * Make sure the inode's type hasn't changed.
         */
-       if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
-               spin_unlock(&inode->i_lock);
+       if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
                goto out_changed;
-       }
 
        /*
         * Update the read time so we don't revalidate too often.
         */
-       nfsi->read_cache_jiffies = fattr->timestamp;
+       nfsi->read_cache_jiffies = fattr->time_start;
+       nfsi->last_updated = jiffies;
 
        /* Are we racing with known updates of the metadata on the server? */
        data_unstable = ! (nfs_verify_change_attribute(inode, verifier) ||
@@ -1354,7 +1440,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
                /* Do we perhaps have any outstanding writes? */
                if (nfsi->npages == 0) {
                        /* No, but did we race with nfs_end_data_update()? */
-                       if (verifier  ==  nfsi->cache_change_attribute) {
+                       if (time_after_eq(verifier,  nfsi->cache_change_attribute)) {
                                inode->i_size = new_isize;
                                invalid |= NFS_INO_INVALID_DATA;
                        }
@@ -1430,7 +1516,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
        if (!nfs_have_delegation(inode, FMODE_READ))
                nfsi->cache_validity |= invalid;
 
-       spin_unlock(&inode->i_lock);
        return 0;
  out_changed:
        /*
@@ -1639,8 +1724,7 @@ static void nfs4_clear_inode(struct inode *inode)
        struct nfs_inode *nfsi = NFS_I(inode);
 
        /* If we are holding a delegation, return it! */
-       if (nfsi->delegation != NULL)
-               nfs_inode_return_delegation(inode);
+       nfs_inode_return_delegation(inode);
        /* First call standard NFS clear_inode() code */
        nfs_clear_inode(inode);
        /* Now clear out any remaining state */
@@ -1669,7 +1753,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
        struct rpc_clnt *clnt = NULL;
        struct rpc_timeout timeparms;
        rpc_authflavor_t authflavour;
-       int proto, err = -EIO;
+       int err = -EIO;
 
        sb->s_blocksize_bits = 0;
        sb->s_blocksize = 0;
@@ -1687,30 +1771,8 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
        server->acdirmax = data->acdirmax*HZ;
 
        server->rpc_ops = &nfs_v4_clientops;
-       /* Initialize timeout values */
-
-       timeparms.to_initval = data->timeo * HZ / 10;
-       timeparms.to_retries = data->retrans;
-       timeparms.to_exponential = 1;
-       if (!timeparms.to_retries)
-               timeparms.to_retries = 5;
 
-       proto = data->proto;
-       /* Which IP protocol do we use? */
-       switch (proto) {
-       case IPPROTO_TCP:
-               timeparms.to_maxval  = RPC_MAX_TCP_TIMEOUT;
-               if (!timeparms.to_initval)
-                       timeparms.to_initval = 600 * HZ / 10;
-               break;
-       case IPPROTO_UDP:
-               timeparms.to_maxval  = RPC_MAX_UDP_TIMEOUT;
-               if (!timeparms.to_initval)
-                       timeparms.to_initval = 11 * HZ / 10;
-               break;
-       default:
-               return -EINVAL;
-       }
+       nfs_init_timeout_values(&timeparms, data->proto, data->timeo, data->retrans);
 
        clp = nfs4_get_client(&server->addr.sin_addr);
        if (!clp) {
@@ -1735,7 +1797,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
 
        down_write(&clp->cl_sem);
        if (IS_ERR(clp->cl_rpcclient)) {
-               xprt = xprt_create_proto(proto, &server->addr, &timeparms);
+               xprt = xprt_create_proto(data->proto, &server->addr, &timeparms);
                if (IS_ERR(xprt)) {
                        up_write(&clp->cl_sem);
                        err = PTR_ERR(xprt);
index d91b69044a4d0d9ace47d2759cec997b6b3760fb..59049e864ca7818288559a91625526a45ec081db 100644 (file)
@@ -143,7 +143,6 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
                fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
                fattr->rdev = 0;
        }
-       fattr->timestamp = jiffies;
        return p;
 }
 
index edc95514046d50415b127988fc36ae577b8b3d97..92c870d19ccdbe61a44d4dcb110c83ef9067e3da 100644 (file)
@@ -78,7 +78,7 @@ nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("%s: call  fsinfo\n", __FUNCTION__);
-       info->fattr->valid = 0;
+       nfs_fattr_init(info->fattr);
        status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
        dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status);
        if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
@@ -98,7 +98,7 @@ nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  getattr\n");
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call(server->client, NFS3PROC_GETATTR,
                          fhandle, fattr, 0);
        dprintk("NFS reply getattr: %d\n", status);
@@ -117,7 +117,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        int     status;
 
        dprintk("NFS call  setattr\n");
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call(NFS_CLIENT(inode), NFS3PROC_SETATTR, &arg, fattr, 0);
        if (status == 0)
                nfs_setattr_update_inode(inode, sattr);
@@ -143,8 +143,8 @@ nfs3_proc_lookup(struct inode *dir, struct qstr *name,
        int                     status;
 
        dprintk("NFS call  lookup %s\n", name->name);
-       dir_attr.valid = 0;
-       fattr->valid = 0;
+       nfs_fattr_init(&dir_attr);
+       nfs_fattr_init(fattr);
        status = rpc_call(NFS_CLIENT(dir), NFS3PROC_LOOKUP, &arg, &res, 0);
        if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR))
                status = rpc_call(NFS_CLIENT(dir), NFS3PROC_GETATTR,
@@ -174,7 +174,6 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
        int status;
 
        dprintk("NFS call  access\n");
-       fattr.valid = 0;
 
        if (mode & MAY_READ)
                arg.access |= NFS3_ACCESS_READ;
@@ -189,6 +188,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
                if (mode & MAY_EXEC)
                        arg.access |= NFS3_ACCESS_EXECUTE;
        }
+       nfs_fattr_init(&fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
        nfs_refresh_inode(inode, &fattr);
        if (status == 0) {
@@ -217,7 +217,7 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page,
        int                     status;
 
        dprintk("NFS call  readlink\n");
-       fattr.valid = 0;
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(inode), NFS3PROC_READLINK,
                          &args, &fattr, 0);
        nfs_refresh_inode(inode, &fattr);
@@ -240,7 +240,7 @@ static int nfs3_proc_read(struct nfs_read_data *rdata)
 
        dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
                        (long long) rdata->args.offset);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
        if (status >= 0)
                nfs_refresh_inode(inode, fattr);
@@ -263,10 +263,10 @@ static int nfs3_proc_write(struct nfs_write_data *wdata)
 
        dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
                        (long long) wdata->args.offset);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
        if (status >= 0)
-               nfs_refresh_inode(inode, fattr);
+               nfs_post_op_update_inode(inode, fattr);
        dprintk("NFS reply write: %d\n", status);
        return status < 0? status : wdata->res.count;
 }
@@ -285,10 +285,10 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata)
 
        dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
                        (long long) cdata->args.offset);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
        if (status >= 0)
-               nfs_refresh_inode(inode, fattr);
+               nfs_post_op_update_inode(inode, fattr);
        dprintk("NFS reply commit: %d\n", status);
        return status;
 }
@@ -299,7 +299,7 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata)
  */
 static int
 nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-                int flags)
+                int flags, struct nameidata *nd)
 {
        struct nfs_fh           fhandle;
        struct nfs_fattr        fattr;
@@ -329,10 +329,10 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        sattr->ia_mode &= ~current->fs->umask;
 
 again:
-       dir_attr.valid = 0;
-       fattr.valid = 0;
+       nfs_fattr_init(&dir_attr);
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(dir), NFS3PROC_CREATE, &arg, &res, 0);
-       nfs_refresh_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(dir, &dir_attr);
 
        /* If the server doesn't support the exclusive creation semantics,
         * try again with simple 'guarded' mode. */
@@ -401,9 +401,9 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name)
        int                     status;
 
        dprintk("NFS call  remove %s\n", name->name);
-       dir_attr.valid = 0;
+       nfs_fattr_init(&dir_attr);
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
-       nfs_refresh_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(dir, &dir_attr);
        dprintk("NFS reply remove: %d\n", status);
        return status;
 }
@@ -422,7 +422,7 @@ nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr
        ptr->arg.fh = NFS_FH(dir->d_inode);
        ptr->arg.name = name->name;
        ptr->arg.len = name->len;
-       ptr->res.valid = 0;
+       nfs_fattr_init(&ptr->res);
        msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE];
        msg->rpc_argp = &ptr->arg;
        msg->rpc_resp = &ptr->res;
@@ -439,7 +439,7 @@ nfs3_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
                return 1;
        if (msg->rpc_argp) {
                dir_attr = (struct nfs_fattr*)msg->rpc_resp;
-               nfs_refresh_inode(dir->d_inode, dir_attr);
+               nfs_post_op_update_inode(dir->d_inode, dir_attr);
                kfree(msg->rpc_argp);
        }
        return 0;
@@ -465,11 +465,11 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
        int                     status;
 
        dprintk("NFS call  rename %s -> %s\n", old_name->name, new_name->name);
-       old_dir_attr.valid = 0;
-       new_dir_attr.valid = 0;
+       nfs_fattr_init(&old_dir_attr);
+       nfs_fattr_init(&new_dir_attr);
        status = rpc_call(NFS_CLIENT(old_dir), NFS3PROC_RENAME, &arg, &res, 0);
-       nfs_refresh_inode(old_dir, &old_dir_attr);
-       nfs_refresh_inode(new_dir, &new_dir_attr);
+       nfs_post_op_update_inode(old_dir, &old_dir_attr);
+       nfs_post_op_update_inode(new_dir, &new_dir_attr);
        dprintk("NFS reply rename: %d\n", status);
        return status;
 }
@@ -491,11 +491,11 @@ nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
        int                     status;
 
        dprintk("NFS call  link %s\n", name->name);
-       dir_attr.valid = 0;
-       fattr.valid = 0;
+       nfs_fattr_init(&dir_attr);
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(inode), NFS3PROC_LINK, &arg, &res, 0);
-       nfs_refresh_inode(dir, &dir_attr);
-       nfs_refresh_inode(inode, &fattr);
+       nfs_post_op_update_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(inode, &fattr);
        dprintk("NFS reply link: %d\n", status);
        return status;
 }
@@ -524,10 +524,10 @@ nfs3_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
        if (path->len > NFS3_MAXPATHLEN)
                return -ENAMETOOLONG;
        dprintk("NFS call  symlink %s -> %s\n", name->name, path->name);
-       dir_attr.valid = 0;
-       fattr->valid = 0;
+       nfs_fattr_init(&dir_attr);
+       nfs_fattr_init(fattr);
        status = rpc_call(NFS_CLIENT(dir), NFS3PROC_SYMLINK, &arg, &res, 0);
-       nfs_refresh_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(dir, &dir_attr);
        dprintk("NFS reply symlink: %d\n", status);
        return status;
 }
@@ -552,13 +552,13 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
        int status;
 
        dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
-       dir_attr.valid = 0;
-       fattr.valid = 0;
 
        sattr->ia_mode &= ~current->fs->umask;
 
+       nfs_fattr_init(&dir_attr);
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0);
-       nfs_refresh_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(dir, &dir_attr);
        if (status != 0)
                goto out;
        status = nfs_instantiate(dentry, &fhandle, &fattr);
@@ -582,9 +582,9 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
        int                     status;
 
        dprintk("NFS call  rmdir %s\n", name->name);
-       dir_attr.valid = 0;
+       nfs_fattr_init(&dir_attr);
        status = rpc_call(NFS_CLIENT(dir), NFS3PROC_RMDIR, &arg, &dir_attr, 0);
-       nfs_refresh_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(dir, &dir_attr);
        dprintk("NFS reply rmdir: %d\n", status);
        return status;
 }
@@ -634,7 +634,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
        dprintk("NFS call  readdir%s %d\n",
                        plus? "plus" : "", (unsigned int) cookie);
 
-       dir_attr.valid = 0;
+       nfs_fattr_init(&dir_attr);
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
        nfs_refresh_inode(dir, &dir_attr);
        dprintk("NFS reply readdir: %d\n", status);
@@ -676,10 +676,10 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
 
        sattr->ia_mode &= ~current->fs->umask;
 
-       dir_attr.valid = 0;
-       fattr.valid = 0;
+       nfs_fattr_init(&dir_attr);
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0);
-       nfs_refresh_inode(dir, &dir_attr);
+       nfs_post_op_update_inode(dir, &dir_attr);
        if (status != 0)
                goto out;
        status = nfs_instantiate(dentry, &fh, &fattr);
@@ -698,7 +698,7 @@ nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  fsstat\n");
-       stat->fattr->valid = 0;
+       nfs_fattr_init(stat->fattr);
        status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0);
        dprintk("NFS reply statfs: %d\n", status);
        return status;
@@ -711,7 +711,7 @@ nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  fsinfo\n");
-       info->fattr->valid = 0;
+       nfs_fattr_init(info->fattr);
        status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
        dprintk("NFS reply fsinfo: %d\n", status);
        return status;
@@ -724,7 +724,7 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  pathconf\n");
-       info->fattr->valid = 0;
+       nfs_fattr_init(info->fattr);
        status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0);
        dprintk("NFS reply pathconf: %d\n", status);
        return status;
@@ -735,7 +735,7 @@ extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
 static void
 nfs3_read_done(struct rpc_task *task)
 {
-       struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+       struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
 
        if (nfs3_async_handle_jukebox(task))
                return;
@@ -775,7 +775,7 @@ nfs3_write_done(struct rpc_task *task)
                return;
        data = (struct nfs_write_data *)task->tk_calldata;
        if (task->tk_status >= 0)
-               nfs_refresh_inode(data->inode, data->res.fattr);
+               nfs_post_op_update_inode(data->inode, data->res.fattr);
        nfs_writeback_done(task);
 }
 
@@ -819,7 +819,7 @@ nfs3_commit_done(struct rpc_task *task)
                return;
        data = (struct nfs_write_data *)task->tk_calldata;
        if (task->tk_status >= 0)
-               nfs_refresh_inode(data->inode, data->res.fattr);
+               nfs_post_op_update_inode(data->inode, data->res.fattr);
        nfs_commit_done(task);
 }
 
index db4a904810a460f0dd6090b12c65c7e5526ac499..0498bd36602cba22cef6cf1c760e645dfaa72318 100644 (file)
@@ -174,7 +174,6 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
 
        /* Update the mode bits */
        fattr->valid |= (NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3);
-       fattr->timestamp = jiffies;
        return p;
 }
 
index ec1a22d7b876694199c5d5b23fb3dc22b92b646a..78a53f5a9f18ae629606e1b9ddabb441baa72f28 100644 (file)
@@ -92,26 +92,51 @@ struct nfs4_client {
        unsigned char           cl_id_uniquifier;
 };
 
+/*
+ * struct rpc_sequence ensures that RPC calls are sent in the exact
+ * order that they appear on the list.
+ */
+struct rpc_sequence {
+       struct rpc_wait_queue   wait;   /* RPC call delay queue */
+       spinlock_t lock;                /* Protects the list */
+       struct list_head list;          /* Defines sequence of RPC calls */
+};
+
+#define NFS_SEQID_CONFIRMED 1
+struct nfs_seqid_counter {
+       struct rpc_sequence *sequence;
+       int flags;
+       u32 counter;
+};
+
+struct nfs_seqid {
+       struct nfs_seqid_counter *sequence;
+       struct list_head list;
+};
+
+static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
+{
+       if (seqid_mutating_err(-status))
+               seqid->flags |= NFS_SEQID_CONFIRMED;
+}
+
 /*
  * NFS4 state_owners and lock_owners are simply labels for ordered
  * sequences of RPC calls. Their sole purpose is to provide once-only
  * semantics by allowing the server to identify replayed requests.
- *
- * The ->so_sema is held during all state_owner seqid-mutating operations:
- * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize
- * so_seqid.
  */
 struct nfs4_state_owner {
+       spinlock_t           so_lock;
        struct list_head     so_list;    /* per-clientid list of state_owners */
        struct nfs4_client   *so_client;
        u32                  so_id;      /* 32-bit identifier, unique */
-       struct semaphore     so_sema;
-       u32                  so_seqid;   /* protected by so_sema */
        atomic_t             so_count;
 
        struct rpc_cred      *so_cred;   /* Associated cred */
        struct list_head     so_states;
        struct list_head     so_delegations;
+       struct nfs_seqid_counter so_seqid;
+       struct rpc_sequence  so_sequence;
 };
 
 /*
@@ -132,7 +157,7 @@ struct nfs4_lock_state {
        fl_owner_t              ls_owner;       /* POSIX lock owner */
 #define NFS_LOCK_INITIALIZED 1
        int                     ls_flags;
-       u32                     ls_seqid;
+       struct nfs_seqid_counter        ls_seqid;
        u32                     ls_id;
        nfs4_stateid            ls_stateid;
        atomic_t                ls_count;
@@ -153,7 +178,6 @@ struct nfs4_state {
        struct inode *inode;            /* Pointer to the inode */
 
        unsigned long flags;            /* Do we hold any locks? */
-       struct semaphore lock_sema;     /* Serializes file locking operations */
        spinlock_t state_lock;          /* Protects the lock_states list */
 
        nfs4_stateid stateid;
@@ -191,8 +215,8 @@ extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
 extern int nfs4_proc_async_renew(struct nfs4_client *);
 extern int nfs4_proc_renew(struct nfs4_client *);
 extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);
-extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
-extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
+extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
+extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
 
 extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
 extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
@@ -224,12 +248,17 @@ extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, mode_t);
 extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
-extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
 extern void nfs4_schedule_state_recovery(struct nfs4_client *);
+extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
 extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
 
+extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
+extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
+extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_free_seqid(struct nfs_seqid *seqid);
+
 extern const nfs4_stateid zero_stateid;
 
 /* nfs4xdr.c */
index 9701ca8c942855a719ccd98559fd878c60ee636c..933e13b383f8f45d1f548d585127409bfe05d69e 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/nfs_page.h>
 #include <linux/smp_lock.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
 #define NFS4_POLL_RETRY_MIN    (1*HZ)
 #define NFS4_POLL_RETRY_MAX    (15*HZ)
 
+static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid);
 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
-static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *);
+static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *);
 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry);
-static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
+static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
 extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
 extern struct rpc_procinfo nfs4_procedures[];
 
@@ -185,8 +187,26 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf
 {
        struct nfs_inode *nfsi = NFS_I(inode);
 
+       spin_lock(&inode->i_lock);
+       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
        if (cinfo->before == nfsi->change_attr && cinfo->atomic)
                nfsi->change_attr = cinfo->after;
+       spin_unlock(&inode->i_lock);
+}
+
+/* Helper for asynchronous RPC calls */
+static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin,
+               rpc_action tk_exit, void *calldata)
+{
+       struct rpc_task *task;
+
+       if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC)))
+               return -ENOMEM;
+
+       task->tk_calldata = calldata;
+       task->tk_action = tk_begin;
+       rpc_execute(task);
+       return 0;
 }
 
 static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
@@ -195,6 +215,7 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid,
 
        open_flags &= (FMODE_READ|FMODE_WRITE);
        /* Protect against nfs4_find_state() */
+       spin_lock(&state->owner->so_lock);
        spin_lock(&inode->i_lock);
        state->state |= open_flags;
        /* NB! List reordering - see the reclaim code for why.  */
@@ -204,12 +225,12 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid,
                state->nreaders++;
        memcpy(&state->stateid, stateid, sizeof(state->stateid));
        spin_unlock(&inode->i_lock);
+       spin_unlock(&state->owner->so_lock);
 }
 
 /*
  * OPEN_RECLAIM:
  *     reclaim state on the server after a reboot.
- *     Assumes caller is holding the sp->so_sem
  */
 static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
 {
@@ -218,7 +239,6 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
        struct nfs_delegation *delegation = NFS_I(inode)->delegation;
        struct nfs_openargs o_arg = {
                .fh = NFS_FH(inode),
-               .seqid = sp->so_seqid,
                .id = sp->so_id,
                .open_flags = state->state,
                .clientid = server->nfs4_state->cl_clientid,
@@ -245,8 +265,13 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
                }
                o_arg.u.delegation_type = delegation->type;
        }
+       o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+       if (o_arg.seqid == NULL)
+               return -ENOMEM;
        status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-       nfs4_increment_seqid(status, sp);
+       /* Confirm the sequence as being established */
+       nfs_confirm_seqid(&sp->so_seqid, status);
+       nfs_increment_open_seqid(status, o_arg.seqid);
        if (status == 0) {
                memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
                if (o_res.delegation_type != 0) {
@@ -256,6 +281,7 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
                                nfs_async_inode_return_delegation(inode, &o_res.stateid);
                }
        }
+       nfs_free_seqid(o_arg.seqid);
        clear_bit(NFS_DELEGATED_STATE, &state->flags);
        /* Ensure we update the inode attributes */
        NFS_CACHEINV(inode);
@@ -302,23 +328,35 @@ static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state
        };
        int status = 0;
 
-       down(&sp->so_sema);
        if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
                goto out;
        if (state->state == 0)
                goto out;
-       arg.seqid = sp->so_seqid;
+       arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+       status = -ENOMEM;
+       if (arg.seqid == NULL)
+               goto out;
        arg.open_flags = state->state;
        memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data));
        status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-       nfs4_increment_seqid(status, sp);
+       nfs_increment_open_seqid(status, arg.seqid);
+       if (status != 0)
+               goto out_free;
+       if(res.rflags & NFS4_OPEN_RESULT_CONFIRM) {
+               status = _nfs4_proc_open_confirm(server->client, NFS_FH(inode),
+                               sp, &res.stateid, arg.seqid);
+               if (status != 0)
+                       goto out_free;
+       }
+       nfs_confirm_seqid(&sp->so_seqid, 0);
        if (status >= 0) {
                memcpy(state->stateid.data, res.stateid.data,
                                sizeof(state->stateid.data));
                clear_bit(NFS_DELEGATED_STATE, &state->flags);
        }
+out_free:
+       nfs_free_seqid(arg.seqid);
 out:
-       up(&sp->so_sema);
        dput(parent);
        return status;
 }
@@ -345,11 +383,11 @@ int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
        return err;
 }
 
-static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid)
+static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid)
 {
        struct nfs_open_confirmargs arg = {
                .fh             = fh,
-               .seqid          = sp->so_seqid,
+               .seqid          = seqid,
                .stateid        = *stateid,
        };
        struct nfs_open_confirmres res;
@@ -362,7 +400,9 @@ static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nf
        int status;
 
        status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR);
-       nfs4_increment_seqid(status, sp);
+       /* Confirm the sequence as being established */
+       nfs_confirm_seqid(&sp->so_seqid, status);
+       nfs_increment_open_seqid(status, seqid);
        if (status >= 0)
                memcpy(stateid, &res.stateid, sizeof(*stateid));
        return status;
@@ -380,21 +420,41 @@ static int _nfs4_proc_open(struct inode *dir, struct nfs4_state_owner  *sp, stru
        int status;
 
        /* Update sequence id. The caller must serialize! */
-       o_arg->seqid = sp->so_seqid;
        o_arg->id = sp->so_id;
        o_arg->clientid = sp->so_client->cl_clientid;
 
        status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-       nfs4_increment_seqid(status, sp);
+       if (status == 0) {
+               /* OPEN on anything except a regular file is disallowed in NFSv4 */
+               switch (o_res->f_attr->mode & S_IFMT) {
+                       case S_IFREG:
+                               break;
+                       case S_IFLNK:
+                               status = -ELOOP;
+                               break;
+                       case S_IFDIR:
+                               status = -EISDIR;
+                               break;
+                       default:
+                               status = -ENOTDIR;
+               }
+       }
+
+       nfs_increment_open_seqid(status, o_arg->seqid);
        if (status != 0)
                goto out;
-       update_changeattr(dir, &o_res->cinfo);
+       if (o_arg->open_flags & O_CREAT) {
+               update_changeattr(dir, &o_res->cinfo);
+               nfs_post_op_update_inode(dir, o_res->dir_attr);
+       } else
+               nfs_refresh_inode(dir, o_res->dir_attr);
        if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
                status = _nfs4_proc_open_confirm(server->client, &o_res->fh,
-                               sp, &o_res->stateid);
+                               sp, &o_res->stateid, o_arg->seqid);
                if (status != 0)
                        goto out;
        }
+       nfs_confirm_seqid(&sp->so_seqid, 0);
        if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
                status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
 out:
@@ -441,9 +501,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
        struct inode *inode = state->inode;
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs_delegation *delegation = NFS_I(inode)->delegation;
-       struct nfs_fattr        f_attr = {
-               .valid = 0,
-       };
+       struct nfs_fattr f_attr, dir_attr;
        struct nfs_openargs o_arg = {
                .fh = NFS_FH(dir),
                .open_flags = state->state,
@@ -453,6 +511,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
        };
        struct nfs_openres o_res = {
                .f_attr = &f_attr,
+               .dir_attr = &dir_attr,
                .server = server,
        };
        int status = 0;
@@ -465,6 +524,12 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
                set_bit(NFS_DELEGATED_STATE, &state->flags);
                goto out;
        }
+       o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+       status = -ENOMEM;
+       if (o_arg.seqid == NULL)
+               goto out;
+       nfs_fattr_init(&f_attr);
+       nfs_fattr_init(&dir_attr);
        status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
        if (status != 0)
                goto out_nodeleg;
@@ -490,6 +555,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
                        nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
        }
 out_nodeleg:
+       nfs_free_seqid(o_arg.seqid);
        clear_bit(NFS_DELEGATED_STATE, &state->flags);
 out:
        dput(parent);
@@ -564,7 +630,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
                dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
                goto out_err;
        }
-       down(&sp->so_sema);
        state = nfs4_get_open_state(inode, sp);
        if (state == NULL)
                goto out_err;
@@ -589,7 +654,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
        set_bit(NFS_DELEGATED_STATE, &state->flags);
        update_open_stateid(state, &delegation->stateid, open_flags);
 out_ok:
-       up(&sp->so_sema);
        nfs4_put_state_owner(sp);
        up_read(&nfsi->rwsem);
        up_read(&clp->cl_sem);
@@ -600,11 +664,12 @@ out_err:
        if (sp != NULL) {
                if (state != NULL)
                        nfs4_put_open_state(state);
-               up(&sp->so_sema);
                nfs4_put_state_owner(sp);
        }
        up_read(&nfsi->rwsem);
        up_read(&clp->cl_sem);
+       if (err != -EACCES)
+               nfs_inode_return_delegation(inode);
        return err;
 }
 
@@ -635,9 +700,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
        struct nfs4_client *clp = server->nfs4_state;
        struct inode *inode = NULL;
        int                     status;
-       struct nfs_fattr        f_attr = {
-               .valid          = 0,
-       };
+       struct nfs_fattr f_attr, dir_attr;
        struct nfs_openargs o_arg = {
                .fh             = NFS_FH(dir),
                .open_flags     = flags,
@@ -648,6 +711,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
        };
        struct nfs_openres o_res = {
                .f_attr         = &f_attr,
+               .dir_attr       = &dir_attr,
                .server         = server,
        };
 
@@ -665,8 +729,12 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
        } else
                o_arg.u.attrs = sattr;
        /* Serialization for the sequence id */
-       down(&sp->so_sema);
 
+       o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+       if (o_arg.seqid == NULL)
+               return -ENOMEM;
+       nfs_fattr_init(&f_attr);
+       nfs_fattr_init(&dir_attr);
        status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
        if (status != 0)
                goto out_err;
@@ -681,7 +749,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
        update_open_stateid(state, &o_res.stateid, flags);
        if (o_res.delegation_type != 0)
                nfs_inode_set_delegation(inode, cred, &o_res);
-       up(&sp->so_sema);
+       nfs_free_seqid(o_arg.seqid);
        nfs4_put_state_owner(sp);
        up_read(&clp->cl_sem);
        *res = state;
@@ -690,7 +758,7 @@ out_err:
        if (sp != NULL) {
                if (state != NULL)
                        nfs4_put_open_state(state);
-               up(&sp->so_sema);
+               nfs_free_seqid(o_arg.seqid);
                nfs4_put_state_owner(sp);
        }
        /* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
@@ -718,7 +786,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
                 * It is actually a sign of a bug on the client or on the server.
                 *
                 * If we receive a BAD_SEQID error in the particular case of
-                * doing an OPEN, we assume that nfs4_increment_seqid() will
+                * doing an OPEN, we assume that nfs_increment_open_seqid() will
                 * have unhashed the old state_owner for us, and that we can
                 * therefore safely retry using a new one. We should still warn
                 * the user though...
@@ -728,6 +796,16 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
                        exception.retry = 1;
                        continue;
                }
+               /*
+                * BAD_STATEID on OPEN means that the server cancelled our
+                * state before it received the OPEN_CONFIRM.
+                * Recover by retrying the request as per the discussion
+                * on Page 181 of RFC3530.
+                */
+               if (status == -NFS4ERR_BAD_STATEID) {
+                       exception.retry = 1;
+                       continue;
+               }
                res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
                                        status, &exception));
        } while (exception.retry);
@@ -755,7 +833,7 @@ static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
         };
        int status;
 
-        fattr->valid = 0;
+       nfs_fattr_init(fattr);
 
        if (state != NULL) {
                msg.rpc_cred = state->owner->so_cred;
@@ -787,19 +865,30 @@ struct nfs4_closedata {
        struct nfs4_state *state;
        struct nfs_closeargs arg;
        struct nfs_closeres res;
+       struct nfs_fattr fattr;
 };
 
+static void nfs4_free_closedata(struct nfs4_closedata *calldata)
+{
+       struct nfs4_state *state = calldata->state;
+       struct nfs4_state_owner *sp = state->owner;
+
+       nfs4_put_open_state(calldata->state);
+       nfs_free_seqid(calldata->arg.seqid);
+       nfs4_put_state_owner(sp);
+       kfree(calldata);
+}
+
 static void nfs4_close_done(struct rpc_task *task)
 {
        struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
        struct nfs4_state *state = calldata->state;
-       struct nfs4_state_owner *sp = state->owner;
        struct nfs_server *server = NFS_SERVER(calldata->inode);
 
         /* hmm. we are done with the inode, and in the process of freeing
         * the state_owner. we keep this around to process errors
         */
-       nfs4_increment_seqid(task->tk_status, sp);
+       nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
        switch (task->tk_status) {
                case 0:
                        memcpy(&state->stateid, &calldata->res.stateid,
@@ -816,25 +905,49 @@ static void nfs4_close_done(struct rpc_task *task)
                                return;
                        }
        }
+       nfs_refresh_inode(calldata->inode, calldata->res.fattr);
        state->state = calldata->arg.open_flags;
-       nfs4_put_open_state(state);
-       up(&sp->so_sema);
-       nfs4_put_state_owner(sp);
-       up_read(&server->nfs4_state->cl_sem);
-       kfree(calldata);
+       nfs4_free_closedata(calldata);
 }
 
-static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *calldata)
+static void nfs4_close_begin(struct rpc_task *task)
 {
+       struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
+       struct nfs4_state *state = calldata->state;
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
                .rpc_argp = &calldata->arg,
                .rpc_resp = &calldata->res,
-               .rpc_cred = calldata->state->owner->so_cred,
+               .rpc_cred = state->owner->so_cred,
        };
-       if (calldata->arg.open_flags != 0)
+       int mode = 0;
+       int status;
+
+       status = nfs_wait_on_sequence(calldata->arg.seqid, task);
+       if (status != 0)
+               return;
+       /* Don't reorder reads */
+       smp_rmb();
+       /* Recalculate the new open mode in case someone reopened the file
+        * while we were waiting in line to be scheduled.
+        */
+       if (state->nreaders != 0)
+               mode |= FMODE_READ;
+       if (state->nwriters != 0)
+               mode |= FMODE_WRITE;
+       if (test_bit(NFS_DELEGATED_STATE, &state->flags))
+               state->state = mode;
+       if (mode == state->state) {
+               nfs4_free_closedata(calldata);
+               task->tk_exit = NULL;
+               rpc_exit(task, 0);
+               return;
+       }
+       nfs_fattr_init(calldata->res.fattr);
+       if (mode != 0)
                msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
-       return rpc_call_async(clnt, &msg, 0, nfs4_close_done, calldata);
+       calldata->arg.open_flags = mode;
+       rpc_call_setup(task, &msg, 0);
 }
 
 /* 
@@ -850,40 +963,57 @@ static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *
  */
 int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode) 
 {
+       struct nfs_server *server = NFS_SERVER(inode);
        struct nfs4_closedata *calldata;
-       int status;
+       int status = -ENOMEM;
 
-       /* Tell caller we're done */
-       if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
-               state->state = mode;
-               return 0;
-       }
-       calldata = (struct nfs4_closedata *)kmalloc(sizeof(*calldata), GFP_KERNEL);
+       calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
        if (calldata == NULL)
-               return -ENOMEM;
+               goto out;
        calldata->inode = inode;
        calldata->state = state;
        calldata->arg.fh = NFS_FH(inode);
+       calldata->arg.stateid = &state->stateid;
        /* Serialization for the sequence id */
-       calldata->arg.seqid = state->owner->so_seqid;
-       calldata->arg.open_flags = mode;
-       memcpy(&calldata->arg.stateid, &state->stateid,
-                       sizeof(calldata->arg.stateid));
-       status = nfs4_close_call(NFS_SERVER(inode)->client, calldata);
-       /*
-        * Return -EINPROGRESS on success in order to indicate to the
-        * caller that an asynchronous RPC call has been launched, and
-        * that it will release the semaphores on completion.
-        */
-       return (status == 0) ? -EINPROGRESS : status;
+       calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
+       if (calldata->arg.seqid == NULL)
+               goto out_free_calldata;
+       calldata->arg.bitmask = server->attr_bitmask;
+       calldata->res.fattr = &calldata->fattr;
+       calldata->res.server = server;
+
+       status = nfs4_call_async(server->client, nfs4_close_begin,
+                       nfs4_close_done, calldata);
+       if (status == 0)
+               goto out;
+
+       nfs_free_seqid(calldata->arg.seqid);
+out_free_calldata:
+       kfree(calldata);
+out:
+       return status;
 }
 
-struct inode *
+static void nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state)
+{
+       struct file *filp;
+
+       filp = lookup_instantiate_filp(nd, dentry, NULL);
+       if (!IS_ERR(filp)) {
+               struct nfs_open_context *ctx;
+               ctx = (struct nfs_open_context *)filp->private_data;
+               ctx->state = state;
+       } else
+               nfs4_close_state(state, nd->intent.open.flags);
+}
+
+struct dentry *
 nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
        struct iattr attr;
        struct rpc_cred *cred;
        struct nfs4_state *state;
+       struct dentry *res;
 
        if (nd->flags & LOOKUP_CREATE) {
                attr.ia_mode = nd->intent.open.create_mode;
@@ -897,16 +1027,23 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 
        cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
        if (IS_ERR(cred))
-               return (struct inode *)cred;
+               return (struct dentry *)cred;
        state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred);
        put_rpccred(cred);
-       if (IS_ERR(state))
-               return (struct inode *)state;
-       return state->inode;
+       if (IS_ERR(state)) {
+               if (PTR_ERR(state) == -ENOENT)
+                       d_add(dentry, NULL);
+               return (struct dentry *)state;
+       }
+       res = d_add_unique(dentry, state->inode);
+       if (res != NULL)
+               dentry = res;
+       nfs4_intent_set_file(nd, dentry, state);
+       return res;
 }
 
 int
-nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
+nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
 {
        struct rpc_cred *cred;
        struct nfs4_state *state;
@@ -919,18 +1056,30 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
        if (IS_ERR(state))
                state = nfs4_do_open(dir, dentry, openflags, NULL, cred);
        put_rpccred(cred);
-       if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0)
-               return 1;
-       if (IS_ERR(state))
-               return 0;
+       if (IS_ERR(state)) {
+               switch (PTR_ERR(state)) {
+                       case -EPERM:
+                       case -EACCES:
+                       case -EDQUOT:
+                       case -ENOSPC:
+                       case -EROFS:
+                               lookup_instantiate_filp(nd, (struct dentry *)state, NULL);
+                               return 1;
+                       case -ENOENT:
+                               if (dentry->d_inode == NULL)
+                                       return 1;
+               }
+               goto out_drop;
+       }
        inode = state->inode;
+       iput(inode);
        if (inode == dentry->d_inode) {
-               iput(inode);
+               nfs4_intent_set_file(nd, dentry, state);
                return 1;
        }
-       d_drop(dentry);
        nfs4_close_state(state, openflags);
-       iput(inode);
+out_drop:
+       d_drop(dentry);
        return 0;
 }
 
@@ -974,13 +1123,12 @@ static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fh
 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
                struct nfs_fsinfo *info)
 {
-       struct nfs_fattr *      fattr = info->fattr;
        struct nfs4_lookup_root_arg args = {
                .bitmask = nfs4_fattr_bitmap,
        };
        struct nfs4_lookup_res res = {
                .server = server,
-               .fattr = fattr,
+               .fattr = info->fattr,
                .fh = fhandle,
        };
        struct rpc_message msg = {
@@ -988,7 +1136,7 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       fattr->valid = 0;
+       nfs_fattr_init(info->fattr);
        return rpc_call_sync(server->client, &msg, 0);
 }
 
@@ -1051,7 +1199,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
                q.len = p - q.name;
 
                do {
-                       fattr->valid = 0;
+                       nfs_fattr_init(fattr);
                        status = nfs4_handle_exception(server,
                                        rpc_call_sync(server->client, &msg, 0),
                                        &exception);
@@ -1088,7 +1236,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
                .rpc_resp = &res,
        };
        
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        return rpc_call_sync(server->client, &msg, 0);
 }
 
@@ -1130,7 +1278,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        struct nfs4_state *state;
        int status;
 
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        
        cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
        if (IS_ERR(cred))
@@ -1176,7 +1324,7 @@ static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name,
                .rpc_resp = &res,
        };
        
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        
        dprintk("NFS call  lookup %s\n", name->name);
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
@@ -1325,7 +1473,7 @@ static int _nfs4_proc_read(struct nfs_read_data *rdata)
        dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
                        (long long) rdata->args.offset);
 
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(server->client, &msg, flags);
        if (!status)
                renew_lease(server, timestamp);
@@ -1362,7 +1510,7 @@ static int _nfs4_proc_write(struct nfs_write_data *wdata)
        dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
                        (long long) wdata->args.offset);
 
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(server->client, &msg, rpcflags);
        dprintk("NFS reply write: %d\n", status);
        return status;
@@ -1396,7 +1544,7 @@ static int _nfs4_proc_commit(struct nfs_write_data *cdata)
        dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
                        (long long) cdata->args.offset);
 
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(server->client, &msg, 0);
        dprintk("NFS reply commit: %d\n", status);
        return status;
@@ -1431,7 +1579,7 @@ static int nfs4_proc_commit(struct nfs_write_data *cdata)
 
 static int
 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-                 int flags)
+                 int flags, struct nameidata *nd)
 {
        struct nfs4_state *state;
        struct rpc_cred *cred;
@@ -1453,24 +1601,30 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
                struct nfs_fattr fattr;
                status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
                                     NFS_FH(state->inode), sattr, state);
-               if (status == 0) {
+               if (status == 0)
                        nfs_setattr_update_inode(state->inode, sattr);
-                       goto out;
-               }
-       } else if (flags != 0)
-               goto out;
-       nfs4_close_state(state, flags);
+       }
+       if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN))
+               nfs4_intent_set_file(nd, dentry, state);
+       else
+               nfs4_close_state(state, flags);
 out:
        return status;
 }
 
 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
 {
+       struct nfs_server *server = NFS_SERVER(dir);
        struct nfs4_remove_arg args = {
                .fh = NFS_FH(dir),
                .name = name,
+               .bitmask = server->attr_bitmask,
+       };
+       struct nfs_fattr dir_attr;
+       struct nfs4_remove_res  res = {
+               .server = server,
+               .dir_attr = &dir_attr,
        };
-       struct nfs4_change_info res;
        struct rpc_message msg = {
                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
                .rpc_argp       = &args,
@@ -1478,9 +1632,12 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
        };
        int                     status;
 
-       status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
-       if (status == 0)
-               update_changeattr(dir, &res);
+       nfs_fattr_init(res.dir_attr);
+       status = rpc_call_sync(server->client, &msg, 0);
+       if (status == 0) {
+               update_changeattr(dir, &res.cinfo);
+               nfs_post_op_update_inode(dir, res.dir_attr);
+       }
        return status;
 }
 
@@ -1498,12 +1655,14 @@ static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
 
 struct unlink_desc {
        struct nfs4_remove_arg  args;
-       struct nfs4_change_info res;
+       struct nfs4_remove_res  res;
+       struct nfs_fattr dir_attr;
 };
 
 static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
                struct qstr *name)
 {
+       struct nfs_server *server = NFS_SERVER(dir->d_inode);
        struct unlink_desc *up;
 
        up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL);
@@ -1512,6 +1671,9 @@ static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
        
        up->args.fh = NFS_FH(dir->d_inode);
        up->args.name = name;
+       up->args.bitmask = server->attr_bitmask;
+       up->res.server = server;
+       up->res.dir_attr = &up->dir_attr;
        
        msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
        msg->rpc_argp = &up->args;
@@ -1526,7 +1688,8 @@ static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
        
        if (msg->rpc_resp != NULL) {
                up = container_of(msg->rpc_resp, struct unlink_desc, res);
-               update_changeattr(dir->d_inode, &up->res);
+               update_changeattr(dir->d_inode, &up->res.cinfo);
+               nfs_post_op_update_inode(dir->d_inode, up->res.dir_attr);
                kfree(up);
                msg->rpc_resp = NULL;
                msg->rpc_argp = NULL;
@@ -1537,13 +1700,20 @@ static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
                struct inode *new_dir, struct qstr *new_name)
 {
+       struct nfs_server *server = NFS_SERVER(old_dir);
        struct nfs4_rename_arg arg = {
                .old_dir = NFS_FH(old_dir),
                .new_dir = NFS_FH(new_dir),
                .old_name = old_name,
                .new_name = new_name,
+               .bitmask = server->attr_bitmask,
+       };
+       struct nfs_fattr old_fattr, new_fattr;
+       struct nfs4_rename_res res = {
+               .server = server,
+               .old_fattr = &old_fattr,
+               .new_fattr = &new_fattr,
        };
-       struct nfs4_rename_res res = { };
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
                .rpc_argp = &arg,
@@ -1551,11 +1721,15 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
        };
        int                     status;
        
-       status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
+       nfs_fattr_init(res.old_fattr);
+       nfs_fattr_init(res.new_fattr);
+       status = rpc_call_sync(server->client, &msg, 0);
 
        if (!status) {
                update_changeattr(old_dir, &res.old_cinfo);
+               nfs_post_op_update_inode(old_dir, res.old_fattr);
                update_changeattr(new_dir, &res.new_cinfo);
+               nfs_post_op_update_inode(new_dir, res.new_fattr);
        }
        return status;
 }
@@ -1576,22 +1750,34 @@ static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
 
 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
 {
+       struct nfs_server *server = NFS_SERVER(inode);
        struct nfs4_link_arg arg = {
                .fh     = NFS_FH(inode),
                .dir_fh = NFS_FH(dir),
                .name   = name,
+               .bitmask = server->attr_bitmask,
+       };
+       struct nfs_fattr fattr, dir_attr;
+       struct nfs4_link_res res = {
+               .server = server,
+               .fattr = &fattr,
+               .dir_attr = &dir_attr,
        };
-       struct nfs4_change_info cinfo = { };
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
                .rpc_argp = &arg,
-               .rpc_resp = &cinfo,
+               .rpc_resp = &res,
        };
        int                     status;
 
-       status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
-       if (!status)
-               update_changeattr(dir, &cinfo);
+       nfs_fattr_init(res.fattr);
+       nfs_fattr_init(res.dir_attr);
+       status = rpc_call_sync(server->client, &msg, 0);
+       if (!status) {
+               update_changeattr(dir, &res.cinfo);
+               nfs_post_op_update_inode(dir, res.dir_attr);
+               nfs_refresh_inode(inode, res.fattr);
+       }
 
        return status;
 }
@@ -1613,6 +1799,7 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
                struct nfs_fattr *fattr)
 {
        struct nfs_server *server = NFS_SERVER(dir);
+       struct nfs_fattr dir_fattr;
        struct nfs4_create_arg arg = {
                .dir_fh = NFS_FH(dir),
                .server = server,
@@ -1625,6 +1812,7 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
                .server = server,
                .fh = fhandle,
                .fattr = fattr,
+               .dir_fattr = &dir_fattr,
        };
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK],
@@ -1636,11 +1824,13 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
        if (path->len > NFS4_MAXPATHLEN)
                return -ENAMETOOLONG;
        arg.u.symlink = path;
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
+       nfs_fattr_init(&dir_fattr);
        
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
        if (!status)
                update_changeattr(dir, &res.dir_cinfo);
+       nfs_post_op_update_inode(dir, res.dir_fattr);
        return status;
 }
 
@@ -1664,7 +1854,7 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
 {
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs_fh fhandle;
-       struct nfs_fattr fattr;
+       struct nfs_fattr fattr, dir_fattr;
        struct nfs4_create_arg arg = {
                .dir_fh = NFS_FH(dir),
                .server = server,
@@ -1677,6 +1867,7 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
                .server = server,
                .fh = &fhandle,
                .fattr = &fattr,
+               .dir_fattr = &dir_fattr,
        };
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
@@ -1685,11 +1876,13 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
        };
        int                     status;
 
-       fattr.valid = 0;
+       nfs_fattr_init(&fattr);
+       nfs_fattr_init(&dir_fattr);
        
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
        if (!status) {
                update_changeattr(dir, &res.dir_cinfo);
+               nfs_post_op_update_inode(dir, res.dir_fattr);
                status = nfs_instantiate(dentry, &fhandle, &fattr);
        }
        return status;
@@ -1762,7 +1955,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
 {
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs_fh fh;
-       struct nfs_fattr fattr;
+       struct nfs_fattr fattr, dir_fattr;
        struct nfs4_create_arg arg = {
                .dir_fh = NFS_FH(dir),
                .server = server,
@@ -1774,6 +1967,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
                .server = server,
                .fh = &fh,
                .fattr = &fattr,
+               .dir_fattr = &dir_fattr,
        };
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
@@ -1783,7 +1977,8 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
        int                     status;
        int                     mode = sattr->ia_mode;
 
-       fattr.valid = 0;
+       nfs_fattr_init(&fattr);
+       nfs_fattr_init(&dir_fattr);
 
        BUG_ON(!(sattr->ia_valid & ATTR_MODE));
        BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
@@ -1805,6 +2000,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
        if (status == 0) {
                update_changeattr(dir, &res.dir_cinfo);
+               nfs_post_op_update_inode(dir, res.dir_fattr);
                status = nfs_instantiate(dentry, &fh, &fattr);
        }
        return status;
@@ -1836,7 +2032,7 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
                .rpc_resp = fsstat,
        };
 
-       fsstat->fattr->valid = 0;
+       nfs_fattr_init(fsstat->fattr);
        return rpc_call_sync(server->client, &msg, 0);
 }
 
@@ -1883,7 +2079,7 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
 
 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
 {
-       fsinfo->fattr->valid = 0;
+       nfs_fattr_init(fsinfo->fattr);
        return nfs4_do_fsinfo(server, fhandle, fsinfo);
 }
 
@@ -1906,7 +2102,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle
                return 0;
        }
 
-       pathconf->fattr->valid = 0;
+       nfs_fattr_init(pathconf->fattr);
        return rpc_call_sync(server->client, &msg, 0);
 }
 
@@ -1973,8 +2169,10 @@ nfs4_write_done(struct rpc_task *task)
                rpc_restart_call(task);
                return;
        }
-       if (task->tk_status >= 0)
+       if (task->tk_status >= 0) {
                renew_lease(NFS_SERVER(inode), data->timestamp);
+               nfs_post_op_update_inode(inode, data->res.fattr);
+       }
        /* Call back common NFS writeback processing */
        nfs_writeback_done(task);
 }
@@ -1990,6 +2188,7 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
                .rpc_cred = data->cred,
        };
        struct inode *inode = data->inode;
+       struct nfs_server *server = NFS_SERVER(inode);
        int stable;
        int flags;
        
@@ -2001,6 +2200,8 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
        } else
                stable = NFS_UNSTABLE;
        data->args.stable = stable;
+       data->args.bitmask = server->attr_bitmask;
+       data->res.server = server;
 
        data->timestamp   = jiffies;
 
@@ -2022,6 +2223,8 @@ nfs4_commit_done(struct rpc_task *task)
                rpc_restart_call(task);
                return;
        }
+       if (task->tk_status >= 0)
+               nfs_post_op_update_inode(inode, data->res.fattr);
        /* Call back common NFS writeback processing */
        nfs_commit_done(task);
 }
@@ -2037,8 +2240,12 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
                .rpc_cred = data->cred,
        };      
        struct inode *inode = data->inode;
+       struct nfs_server *server = NFS_SERVER(inode);
        int flags;
        
+       data->args.bitmask = server->attr_bitmask;
+       data->res.server = server;
+
        /* Set the initial flags for the task.  */
        flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
 
@@ -2106,65 +2313,6 @@ nfs4_proc_renew(struct nfs4_client *clp)
        return 0;
 }
 
-/*
- * We will need to arrange for the VFS layer to provide an atomic open.
- * Until then, this open method is prone to inefficiency and race conditions
- * due to the lookup, potential create, and open VFS calls from sys_open()
- * placed on the wire.
- */
-static int
-nfs4_proc_file_open(struct inode *inode, struct file *filp)
-{
-       struct dentry *dentry = filp->f_dentry;
-       struct nfs_open_context *ctx;
-       struct nfs4_state *state = NULL;
-       struct rpc_cred *cred;
-       int status = -ENOMEM;
-
-       dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n",
-                              (int)dentry->d_parent->d_name.len,
-                              dentry->d_parent->d_name.name,
-                              (int)dentry->d_name.len, dentry->d_name.name);
-
-
-       /* Find our open stateid */
-       cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
-       if (IS_ERR(cred))
-               return PTR_ERR(cred);
-       ctx = alloc_nfs_open_context(dentry, cred);
-       put_rpccred(cred);
-       if (unlikely(ctx == NULL))
-               return -ENOMEM;
-       status = -EIO; /* ERACE actually */
-       state = nfs4_find_state(inode, cred, filp->f_mode);
-       if (unlikely(state == NULL))
-               goto no_state;
-       ctx->state = state;
-       nfs4_close_state(state, filp->f_mode);
-       ctx->mode = filp->f_mode;
-       nfs_file_set_open_context(filp, ctx);
-       put_nfs_open_context(ctx);
-       if (filp->f_mode & FMODE_WRITE)
-               nfs_begin_data_update(inode);
-       return 0;
-no_state:
-       printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__);
-       put_nfs_open_context(ctx);
-       return status;
-}
-
-/*
- * Release our state
- */
-static int
-nfs4_proc_file_release(struct inode *inode, struct file *filp)
-{
-       if (filp->f_mode & FMODE_WRITE)
-               nfs_end_data_update(inode);
-       nfs_file_clear_open_context(filp);
-       return 0;
-}
-
 static inline int nfs4_server_supports_acls(struct nfs_server *server)
 {
        return (server->caps & NFS_CAP_ACLS)
@@ -2285,7 +2433,7 @@ static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size
                        return -ENOMEM;
                args.acl_pages[0] = localpage;
                args.acl_pgbase = 0;
-               args.acl_len = PAGE_SIZE;
+               resp_len = args.acl_len = PAGE_SIZE;
        } else {
                resp_buf = buf;
                buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
@@ -2345,6 +2493,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
 
        if (!nfs4_server_supports_acls(server))
                return -EOPNOTSUPP;
+       nfs_inode_return_delegation(inode);
        buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
        ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);
        if (ret == 0)
@@ -2353,7 +2502,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
 }
 
 static int
-nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
 {
        struct nfs4_client *clp = server->nfs4_state;
 
@@ -2431,7 +2580,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
 /* This is the error handling routine for processes that are allowed
  * to sleep.
  */
-int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
 {
        struct nfs4_client *clp = server->nfs4_state;
        int ret = errorcode;
@@ -2632,7 +2781,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
 
        down_read(&clp->cl_sem);
        nlo.clientid = clp->cl_clientid;
-       down(&state->lock_sema);
        status = nfs4_set_lock_state(state, request);
        if (status != 0)
                goto out;
@@ -2659,7 +2807,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
                status = 0;
        }
 out:
-       up(&state->lock_sema);
        up_read(&clp->cl_sem);
        return status;
 }
@@ -2696,79 +2843,149 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
        return res;
 }
 
-static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+struct nfs4_unlockdata {
+       struct nfs_lockargs arg;
+       struct nfs_locku_opargs luargs;
+       struct nfs_lockres res;
+       struct nfs4_lock_state *lsp;
+       struct nfs_open_context *ctx;
+       atomic_t refcount;
+       struct completion completion;
+};
+
+static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata)
 {
-       struct inode *inode = state->inode;
-       struct nfs_server *server = NFS_SERVER(inode);
-       struct nfs4_client *clp = server->nfs4_state;
-       struct nfs_lockargs arg = {
-               .fh = NFS_FH(inode),
-               .type = nfs4_lck_type(cmd, request),
-               .offset = request->fl_start,
-               .length = nfs4_lck_length(request),
-       };
-       struct nfs_lockres res = {
-               .server = server,
-       };
+       if (atomic_dec_and_test(&calldata->refcount)) {
+               nfs_free_seqid(calldata->luargs.seqid);
+               nfs4_put_lock_state(calldata->lsp);
+               put_nfs_open_context(calldata->ctx);
+               kfree(calldata);
+       }
+}
+
+static void nfs4_locku_complete(struct nfs4_unlockdata *calldata)
+{
+       complete(&calldata->completion);
+       nfs4_locku_release_calldata(calldata);
+}
+
+static void nfs4_locku_done(struct rpc_task *task)
+{
+       struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
+
+       nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid);
+       switch (task->tk_status) {
+               case 0:
+                       memcpy(calldata->lsp->ls_stateid.data,
+                                       calldata->res.u.stateid.data,
+                                       sizeof(calldata->lsp->ls_stateid.data));
+                       break;
+               case -NFS4ERR_STALE_STATEID:
+               case -NFS4ERR_EXPIRED:
+                       nfs4_schedule_state_recovery(calldata->res.server->nfs4_state);
+                       break;
+               default:
+                       if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) {
+                               rpc_restart_call(task);
+                               return;
+                       }
+       }
+       nfs4_locku_complete(calldata);
+}
+
+static void nfs4_locku_begin(struct rpc_task *task)
+{
+       struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
        struct rpc_message msg = {
                .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
-               .rpc_argp       = &arg,
-               .rpc_resp       = &res,
-               .rpc_cred       = state->owner->so_cred,
+               .rpc_argp       = &calldata->arg,
+               .rpc_resp       = &calldata->res,
+               .rpc_cred       = calldata->lsp->ls_state->owner->so_cred,
        };
+       int status;
+
+       status = nfs_wait_on_sequence(calldata->luargs.seqid, task);
+       if (status != 0)
+               return;
+       if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
+               nfs4_locku_complete(calldata);
+               task->tk_exit = NULL;
+               rpc_exit(task, 0);
+               return;
+       }
+       rpc_call_setup(task, &msg, 0);
+}
+
+static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+       struct nfs4_unlockdata *calldata;
+       struct inode *inode = state->inode;
+       struct nfs_server *server = NFS_SERVER(inode);
        struct nfs4_lock_state *lsp;
-       struct nfs_locku_opargs luargs;
        int status;
-                       
-       down_read(&clp->cl_sem);
-       down(&state->lock_sema);
+
        status = nfs4_set_lock_state(state, request);
        if (status != 0)
-               goto out;
+               return status;
        lsp = request->fl_u.nfs4_fl.owner;
        /* We might have lost the locks! */
        if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
-               goto out;
-       luargs.seqid = lsp->ls_seqid;
-       memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
-       arg.u.locku = &luargs;
-       status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-       nfs4_increment_lock_seqid(status, lsp);
-
-       if (status == 0)
-               memcpy(&lsp->ls_stateid,  &res.u.stateid, 
-                               sizeof(lsp->ls_stateid));
-out:
-       up(&state->lock_sema);
+               return 0;
+       calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
+       if (calldata == NULL)
+               return -ENOMEM;
+       calldata->luargs.seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+       if (calldata->luargs.seqid == NULL) {
+               kfree(calldata);
+               return -ENOMEM;
+       }
+       calldata->luargs.stateid = &lsp->ls_stateid;
+       calldata->arg.fh = NFS_FH(inode);
+       calldata->arg.type = nfs4_lck_type(cmd, request);
+       calldata->arg.offset = request->fl_start;
+       calldata->arg.length = nfs4_lck_length(request);
+       calldata->arg.u.locku = &calldata->luargs;
+       calldata->res.server = server;
+       calldata->lsp = lsp;
+       atomic_inc(&lsp->ls_count);
+
+       /* Ensure we don't close file until we're done freeing locks! */
+       calldata->ctx = get_nfs_open_context((struct nfs_open_context*)request->fl_file->private_data);
+
+       atomic_set(&calldata->refcount, 2);
+       init_completion(&calldata->completion);
+
+       status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin,
+                       nfs4_locku_done, calldata);
        if (status == 0)
-               do_vfs_lock(request->fl_file, request);
-       up_read(&clp->cl_sem);
+               wait_for_completion_interruptible(&calldata->completion);
+       do_vfs_lock(request->fl_file, request);
+       nfs4_locku_release_calldata(calldata);
        return status;
 }
 
-static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
-{
-       struct nfs4_exception exception = { };
-       int err;
-
-       do {
-               err = nfs4_handle_exception(NFS_SERVER(state->inode),
-                               _nfs4_proc_unlck(state, cmd, request),
-                               &exception);
-       } while (exception.retry);
-       return err;
-}
-
 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim)
 {
        struct inode *inode = state->inode;
        struct nfs_server *server = NFS_SERVER(inode);
        struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+       struct nfs_lock_opargs largs = {
+               .lock_stateid = &lsp->ls_stateid,
+               .open_stateid = &state->stateid,
+               .lock_owner = {
+                       .clientid = server->nfs4_state->cl_clientid,
+                       .id = lsp->ls_id,
+               },
+               .reclaim = reclaim,
+       };
        struct nfs_lockargs arg = {
                .fh = NFS_FH(inode),
                .type = nfs4_lck_type(cmd, request),
                .offset = request->fl_start,
                .length = nfs4_lck_length(request),
+               .u = {
+                       .lock = &largs,
+               },
        };
        struct nfs_lockres res = {
                .server = server,
@@ -2779,53 +2996,39 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
                .rpc_resp       = &res,
                .rpc_cred       = state->owner->so_cred,
        };
-       struct nfs_lock_opargs largs = {
-               .reclaim = reclaim,
-               .new_lock_owner = 0,
-       };
-       int status;
+       int status = -ENOMEM;
 
-       if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
+       largs.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+       if (largs.lock_seqid == NULL)
+               return -ENOMEM;
+       if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) {
                struct nfs4_state_owner *owner = state->owner;
-               struct nfs_open_to_lock otl = {
-                       .lock_owner = {
-                               .clientid = server->nfs4_state->cl_clientid,
-                       },
-               };
-
-               otl.lock_seqid = lsp->ls_seqid;
-               otl.lock_owner.id = lsp->ls_id;
-               memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid));
-               largs.u.open_lock = &otl;
+
+               largs.open_seqid = nfs_alloc_seqid(&owner->so_seqid);
+               if (largs.open_seqid == NULL)
+                       goto out;
                largs.new_lock_owner = 1;
-               arg.u.lock = &largs;
-               down(&owner->so_sema);
-               otl.open_seqid = owner->so_seqid;
                status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-               /* increment open_owner seqid on success, and 
-               * seqid mutating errors */
-               nfs4_increment_seqid(status, owner);
-               up(&owner->so_sema);
-               if (status == 0) {
-                       lsp->ls_flags |= NFS_LOCK_INITIALIZED;
-                       lsp->ls_seqid++;
+               /* increment open seqid on success, and seqid mutating errors */
+               if (largs.new_lock_owner != 0) {
+                       nfs_increment_open_seqid(status, largs.open_seqid);
+                       if (status == 0)
+                               nfs_confirm_seqid(&lsp->ls_seqid, 0);
                }
-       } else {
-               struct nfs_exist_lock el = {
-                       .seqid = lsp->ls_seqid,
-               };
-               memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
-               largs.u.exist_lock = &el;
-               arg.u.lock = &largs;
+               nfs_free_seqid(largs.open_seqid);
+       } else
                status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-               /* increment seqid on success, and * seqid mutating errors*/
-               nfs4_increment_lock_seqid(status, lsp);
-       }
+       /* increment lock seqid on success, and seqid mutating errors*/
+       nfs_increment_lock_seqid(status, largs.lock_seqid);
        /* save the returned stateid. */
-       if (status == 0)
-               memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
-       else if (status == -NFS4ERR_DENIED)
+       if (status == 0) {
+               memcpy(lsp->ls_stateid.data, res.u.stateid.data,
+                               sizeof(lsp->ls_stateid.data));
+               lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+       } else if (status == -NFS4ERR_DENIED)
                status = -EAGAIN;
+out:
+       nfs_free_seqid(largs.lock_seqid);
        return status;
 }
 
@@ -2865,11 +3068,9 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
        int status;
 
        down_read(&clp->cl_sem);
-       down(&state->lock_sema);
        status = nfs4_set_lock_state(state, request);
        if (status == 0)
                status = _nfs4_do_setlk(state, cmd, request, 0);
-       up(&state->lock_sema);
        if (status == 0) {
                /* Note: we always want to sleep here! */
                request->fl_flags |= FL_SLEEP;
@@ -3024,8 +3225,8 @@ struct nfs_rpc_ops        nfs_v4_clientops = {
        .read_setup     = nfs4_proc_read_setup,
        .write_setup    = nfs4_proc_write_setup,
        .commit_setup   = nfs4_proc_commit_setup,
-       .file_open      = nfs4_proc_file_open,
-       .file_release   = nfs4_proc_file_release,
+       .file_open      = nfs_open,
+       .file_release   = nfs_release,
        .lock           = nfs4_proc_lock,
        .clear_acl_cache = nfs4_zap_acl_attr,
 };
index afe587d82f1e71ebc8eabab97c1b9e45bdf67365..2d5a6a2b9dec780616ff5691258a86bc9cea45f6 100644 (file)
@@ -264,13 +264,16 @@ nfs4_alloc_state_owner(void)
 {
        struct nfs4_state_owner *sp;
 
-       sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+       sp = kzalloc(sizeof(*sp),GFP_KERNEL);
        if (!sp)
                return NULL;
-       init_MUTEX(&sp->so_sema);
-       sp->so_seqid = 0;                 /* arbitrary */
+       spin_lock_init(&sp->so_lock);
        INIT_LIST_HEAD(&sp->so_states);
        INIT_LIST_HEAD(&sp->so_delegations);
+       rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
+       sp->so_seqid.sequence = &sp->so_sequence;
+       spin_lock_init(&sp->so_sequence.lock);
+       INIT_LIST_HEAD(&sp->so_sequence.list);
        atomic_set(&sp->so_count, 1);
        return sp;
 }
@@ -359,7 +362,6 @@ nfs4_alloc_open_state(void)
        memset(state->stateid.data, 0, sizeof(state->stateid.data));
        atomic_set(&state->count, 1);
        INIT_LIST_HEAD(&state->lock_states);
-       init_MUTEX(&state->lock_sema);
        spin_lock_init(&state->state_lock);
        return state;
 }
@@ -437,21 +439,23 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
        if (state)
                goto out;
        new = nfs4_alloc_open_state();
+       spin_lock(&owner->so_lock);
        spin_lock(&inode->i_lock);
        state = __nfs4_find_state_byowner(inode, owner);
        if (state == NULL && new != NULL) {
                state = new;
-               /* Caller *must* be holding owner->so_sem */
-               /* Note: The reclaim code dictates that we add stateless
-                * and read-only stateids to the end of the list */
-               list_add_tail(&state->open_states, &owner->so_states);
                state->owner = owner;
                atomic_inc(&owner->so_count);
                list_add(&state->inode_states, &nfsi->open_states);
                state->inode = igrab(inode);
                spin_unlock(&inode->i_lock);
+               /* Note: The reclaim code dictates that we add stateless
+                * and read-only stateids to the end of the list */
+               list_add_tail(&state->open_states, &owner->so_states);
+               spin_unlock(&owner->so_lock);
        } else {
                spin_unlock(&inode->i_lock);
+               spin_unlock(&owner->so_lock);
                if (new)
                        nfs4_free_open_state(new);
        }
@@ -461,19 +465,21 @@ out:
 
 /*
  * Beware! Caller must be holding exactly one
- * reference to clp->cl_sem and owner->so_sema!
+ * reference to clp->cl_sem!
  */
 void nfs4_put_open_state(struct nfs4_state *state)
 {
        struct inode *inode = state->inode;
        struct nfs4_state_owner *owner = state->owner;
 
-       if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
+       if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
                return;
+       spin_lock(&inode->i_lock);
        if (!list_empty(&state->inode_states))
                list_del(&state->inode_states);
-       spin_unlock(&inode->i_lock);
        list_del(&state->open_states);
+       spin_unlock(&inode->i_lock);
+       spin_unlock(&owner->so_lock);
        iput(inode);
        BUG_ON (state->state != 0);
        nfs4_free_open_state(state);
@@ -481,20 +487,17 @@ void nfs4_put_open_state(struct nfs4_state *state)
 }
 
 /*
- * Beware! Caller must be holding no references to clp->cl_sem!
- * of owner->so_sema!
+ * Close the current file.
  */
 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
 {
        struct inode *inode = state->inode;
        struct nfs4_state_owner *owner = state->owner;
-       struct nfs4_client *clp = owner->so_client;
        int newstate;
 
        atomic_inc(&owner->so_count);
-       down_read(&clp->cl_sem);
-       down(&owner->so_sema);
        /* Protect against nfs4_find_state() */
+       spin_lock(&owner->so_lock);
        spin_lock(&inode->i_lock);
        if (mode & FMODE_READ)
                state->nreaders--;
@@ -507,6 +510,7 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
                list_move_tail(&state->open_states, &owner->so_states);
        }
        spin_unlock(&inode->i_lock);
+       spin_unlock(&owner->so_lock);
        newstate = 0;
        if (state->state != 0) {
                if (state->nreaders)
@@ -515,14 +519,16 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
                        newstate |= FMODE_WRITE;
                if (state->state == newstate)
                        goto out;
-               if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
+               if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+                       state->state = newstate;
+                       goto out;
+               }
+               if (nfs4_do_close(inode, state, newstate) == 0)
                        return;
        }
 out:
        nfs4_put_open_state(state);
-       up(&owner->so_sema);
        nfs4_put_state_owner(owner);
-       up_read(&clp->cl_sem);
 }
 
 /*
@@ -546,19 +552,16 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
  *
- * The caller must be holding state->lock_sema
  */
 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
 {
        struct nfs4_lock_state *lsp;
        struct nfs4_client *clp = state->owner->so_client;
 
-       lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
+       lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
        if (lsp == NULL)
                return NULL;
-       lsp->ls_flags = 0;
-       lsp->ls_seqid = 0;      /* arbitrary */
-       memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
+       lsp->ls_seqid.sequence = &state->owner->so_sequence;
        atomic_set(&lsp->ls_count, 1);
        lsp->ls_owner = fl_owner;
        spin_lock(&clp->cl_lock);
@@ -572,7 +575,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
  *
- * The caller must be holding state->lock_sema and clp->cl_sem
+ * The caller must be holding clp->cl_sem
  */
 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
 {
@@ -605,7 +608,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
  * Release reference to lock_state, and free it if we see that
  * it is no longer in use
  */
-static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
+void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
 {
        struct nfs4_state *state;
 
@@ -673,29 +676,94 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
        nfs4_put_lock_state(lsp);
 }
 
-/*
-* Called with state->lock_sema and clp->cl_sem held.
-*/
-void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
+struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
 {
-       if (status == NFS_OK || seqid_mutating_err(-status))
-               lsp->ls_seqid++;
+       struct nfs_seqid *new;
+
+       new = kmalloc(sizeof(*new), GFP_KERNEL);
+       if (new != NULL) {
+               new->sequence = counter;
+               INIT_LIST_HEAD(&new->list);
+       }
+       return new;
+}
+
+void nfs_free_seqid(struct nfs_seqid *seqid)
+{
+       struct rpc_sequence *sequence = seqid->sequence->sequence;
+
+       if (!list_empty(&seqid->list)) {
+               spin_lock(&sequence->lock);
+               list_del(&seqid->list);
+               spin_unlock(&sequence->lock);
+       }
+       rpc_wake_up_next(&sequence->wait);
+       kfree(seqid);
 }
 
 /*
-* Called with sp->so_sema and clp->cl_sem held.
-*
-* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
-* failed with a seqid incrementing error -
-* see comments nfs_fs.h:seqid_mutating_error()
-*/
-void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
-{
-       if (status == NFS_OK || seqid_mutating_err(-status))
-               sp->so_seqid++;
-       /* If the server returns BAD_SEQID, unhash state_owner here */
-       if (status == -NFS4ERR_BAD_SEQID)
+ * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
+{
+       switch (status) {
+               case 0:
+                       break;
+               case -NFS4ERR_BAD_SEQID:
+               case -NFS4ERR_STALE_CLIENTID:
+               case -NFS4ERR_STALE_STATEID:
+               case -NFS4ERR_BAD_STATEID:
+               case -NFS4ERR_BADXDR:
+               case -NFS4ERR_RESOURCE:
+               case -NFS4ERR_NOFILEHANDLE:
+                       /* Non-seqid mutating errors */
+                       return;
+       };
+       /*
+        * Note: no locking needed as we are guaranteed to be first
+        * on the sequence list
+        */
+       seqid->sequence->counter++;
+}
+
+void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
+{
+       if (status == -NFS4ERR_BAD_SEQID) {
+               struct nfs4_state_owner *sp = container_of(seqid->sequence,
+                               struct nfs4_state_owner, so_seqid);
                nfs4_drop_state_owner(sp);
+       }
+       return nfs_increment_seqid(status, seqid);
+}
+
+/*
+ * Increment the seqid if the LOCK/LOCKU succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
+{
+       return nfs_increment_seqid(status, seqid);
+}
+
+int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
+{
+       struct rpc_sequence *sequence = seqid->sequence->sequence;
+       int status = 0;
+
+       if (sequence->list.next == &seqid->list)
+               goto out;
+       spin_lock(&sequence->lock);
+       if (!list_empty(&sequence->list)) {
+               rpc_sleep_on(&sequence->wait, task, NULL, NULL);
+               status = -EAGAIN;
+       } else
+               list_add(&seqid->list, &sequence->list);
+       spin_unlock(&sequence->lock);
+out:
+       return status;
 }
 
 static int reclaimer(void *);
@@ -791,8 +859,6 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n
                if (state->state == 0)
                        continue;
                status = ops->recover_open(sp, state);
-               list_for_each_entry(lock, &state->lock_states, ls_locks)
-                       lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
                if (status >= 0) {
                        status = nfs4_reclaim_locks(ops, state);
                        if (status < 0)
@@ -831,6 +897,28 @@ out_err:
        return status;
 }
 
+static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
+{
+       struct nfs4_state_owner *sp;
+       struct nfs4_state *state;
+       struct nfs4_lock_state *lock;
+
+       /* Reset all sequence ids to zero */
+       list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+               sp->so_seqid.counter = 0;
+               sp->so_seqid.flags = 0;
+               spin_lock(&sp->so_lock);
+               list_for_each_entry(state, &sp->so_states, open_states) {
+                       list_for_each_entry(lock, &state->lock_states, ls_locks) {
+                               lock->ls_seqid.counter = 0;
+                               lock->ls_seqid.flags = 0;
+                               lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+                       }
+               }
+               spin_unlock(&sp->so_lock);
+       }
+}
+
 static int reclaimer(void *ptr)
 {
        struct reclaimer_args *args = (struct reclaimer_args *)ptr;
@@ -864,6 +952,7 @@ restart_loop:
                default:
                        ops = &nfs4_network_partition_recovery_ops;
        };
+       nfs4_state_mark_reclaim(clp);
        status = __nfs4_init_client(clp);
        if (status)
                goto out_error;
index 6c564ef9489ef833fb293eb776aee4ec75db1290..fbbace8a30c4da649507d66827cec057f0c7df40 100644 (file)
@@ -95,6 +95,8 @@ static int nfs_stat_to_errno(int);
 #define decode_getattr_maxsz    (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
 #define encode_savefh_maxsz     (op_encode_hdr_maxsz)
 #define decode_savefh_maxsz     (op_decode_hdr_maxsz)
+#define encode_restorefh_maxsz  (op_encode_hdr_maxsz)
+#define decode_restorefh_maxsz  (op_decode_hdr_maxsz)
 #define encode_fsinfo_maxsz    (op_encode_hdr_maxsz + 2)
 #define decode_fsinfo_maxsz    (op_decode_hdr_maxsz + 11)
 #define encode_renew_maxsz     (op_encode_hdr_maxsz + 3)
@@ -157,16 +159,20 @@ static int nfs_stat_to_errno(int);
                                op_decode_hdr_maxsz + 2)
 #define NFS4_enc_write_sz      (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
-                               op_encode_hdr_maxsz + 8)
+                               op_encode_hdr_maxsz + 8 + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_write_sz      (compound_decode_hdr_maxsz + \
                                decode_putfh_maxsz + \
-                               op_decode_hdr_maxsz + 4)
+                               op_decode_hdr_maxsz + 4 + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_commit_sz     (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
-                               op_encode_hdr_maxsz + 3)
+                               op_encode_hdr_maxsz + 3 + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_commit_sz     (compound_decode_hdr_maxsz + \
                                decode_putfh_maxsz + \
-                               op_decode_hdr_maxsz + 2)
+                               op_decode_hdr_maxsz + 2 + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_open_sz        (compound_encode_hdr_maxsz + \
                                 encode_putfh_maxsz + \
                                 op_encode_hdr_maxsz + \
@@ -196,17 +202,21 @@ static int nfs_stat_to_errno(int);
 #define NFS4_enc_open_downgrade_sz \
                                (compound_encode_hdr_maxsz + \
                                 encode_putfh_maxsz + \
-                                op_encode_hdr_maxsz + 7)
+                                op_encode_hdr_maxsz + 7 + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_open_downgrade_sz \
                                (compound_decode_hdr_maxsz + \
                                 decode_putfh_maxsz + \
-                                op_decode_hdr_maxsz + 4)
+                                op_decode_hdr_maxsz + 4 + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_close_sz       (compound_encode_hdr_maxsz + \
                                 encode_putfh_maxsz + \
-                                op_encode_hdr_maxsz + 5)
+                                op_encode_hdr_maxsz + 5 + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_close_sz       (compound_decode_hdr_maxsz + \
                                 decode_putfh_maxsz + \
-                                op_decode_hdr_maxsz + 4)
+                                op_decode_hdr_maxsz + 4 + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_setattr_sz     (compound_encode_hdr_maxsz + \
                                 encode_putfh_maxsz + \
                                 op_encode_hdr_maxsz + 4 + \
@@ -300,30 +310,44 @@ static int nfs_stat_to_errno(int);
                                decode_getfh_maxsz)
 #define NFS4_enc_remove_sz     (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_remove_maxsz)
+                               encode_remove_maxsz + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_remove_sz     (compound_decode_hdr_maxsz + \
                                decode_putfh_maxsz + \
-                               op_decode_hdr_maxsz + 5)
+                               op_decode_hdr_maxsz + 5 + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_rename_sz     (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
                                encode_savefh_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_rename_maxsz)
+                               encode_rename_maxsz + \
+                               encode_getattr_maxsz + \
+                               encode_restorefh_maxsz + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_rename_sz     (compound_decode_hdr_maxsz + \
                                decode_putfh_maxsz + \
                                decode_savefh_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_rename_maxsz)
+                               decode_rename_maxsz + \
+                               decode_getattr_maxsz + \
+                               decode_restorefh_maxsz + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_link_sz       (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
                                encode_savefh_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_link_maxsz)
+                               encode_link_maxsz + \
+                               decode_getattr_maxsz + \
+                               encode_restorefh_maxsz + \
+                               decode_getattr_maxsz)
 #define NFS4_dec_link_sz       (compound_decode_hdr_maxsz + \
                                decode_putfh_maxsz + \
                                decode_savefh_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_link_maxsz)
+                               decode_link_maxsz + \
+                               decode_getattr_maxsz + \
+                               decode_restorefh_maxsz + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_symlink_sz    (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
                                encode_symlink_maxsz + \
@@ -336,14 +360,20 @@ static int nfs_stat_to_errno(int);
                                decode_getfh_maxsz)
 #define NFS4_enc_create_sz     (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
+                               encode_savefh_maxsz + \
                                encode_create_maxsz + \
+                               encode_getfh_maxsz + \
                                encode_getattr_maxsz + \
-                               encode_getfh_maxsz)
+                               encode_restorefh_maxsz + \
+                               encode_getattr_maxsz)
 #define NFS4_dec_create_sz     (compound_decode_hdr_maxsz + \
                                decode_putfh_maxsz + \
+                               decode_savefh_maxsz + \
                                decode_create_maxsz + \
+                               decode_getfh_maxsz + \
                                decode_getattr_maxsz + \
-                               decode_getfh_maxsz)
+                               decode_restorefh_maxsz + \
+                               decode_getattr_maxsz)
 #define NFS4_enc_pathconf_sz   (compound_encode_hdr_maxsz + \
                                encode_putfh_maxsz + \
                                encode_getattr_maxsz)
@@ -602,10 +632,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg)
 {
        uint32_t *p;
 
-       RESERVE_SPACE(8+sizeof(arg->stateid.data));
+       RESERVE_SPACE(8+sizeof(arg->stateid->data));
        WRITE32(OP_CLOSE);
-       WRITE32(arg->seqid);
-       WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+       WRITE32(arg->seqid->sequence->counter);
+       WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
        
        return 0;
 }
@@ -729,22 +759,18 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
        WRITE64(arg->length);
        WRITE32(opargs->new_lock_owner);
        if (opargs->new_lock_owner){
-               struct nfs_open_to_lock *ol = opargs->u.open_lock;
-
                RESERVE_SPACE(40);
-               WRITE32(ol->open_seqid);
-               WRITEMEM(&ol->open_stateid, sizeof(ol->open_stateid));
-               WRITE32(ol->lock_seqid);
-               WRITE64(ol->lock_owner.clientid);
+               WRITE32(opargs->open_seqid->sequence->counter);
+               WRITEMEM(opargs->open_stateid->data, sizeof(opargs->open_stateid->data));
+               WRITE32(opargs->lock_seqid->sequence->counter);
+               WRITE64(opargs->lock_owner.clientid);
                WRITE32(4);
-               WRITE32(ol->lock_owner.id);
+               WRITE32(opargs->lock_owner.id);
        }
        else {
-               struct nfs_exist_lock *el = opargs->u.exist_lock;
-
                RESERVE_SPACE(20);
-               WRITEMEM(&el->stateid, sizeof(el->stateid));
-               WRITE32(el->seqid);
+               WRITEMEM(opargs->lock_stateid->data, sizeof(opargs->lock_stateid->data));
+               WRITE32(opargs->lock_seqid->sequence->counter);
        }
 
        return 0;
@@ -775,8 +801,8 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
        RESERVE_SPACE(44);
        WRITE32(OP_LOCKU);
        WRITE32(arg->type);
-       WRITE32(opargs->seqid);
-       WRITEMEM(&opargs->stateid, sizeof(opargs->stateid));
+       WRITE32(opargs->seqid->sequence->counter);
+       WRITEMEM(opargs->stateid->data, sizeof(opargs->stateid->data));
        WRITE64(arg->offset);
        WRITE64(arg->length);
 
@@ -826,7 +852,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
  */
        RESERVE_SPACE(8);
        WRITE32(OP_OPEN);
-       WRITE32(arg->seqid);
+       WRITE32(arg->seqid->sequence->counter);
        encode_share_access(xdr, arg->open_flags);
        RESERVE_SPACE(16);
        WRITE64(arg->clientid);
@@ -941,7 +967,7 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con
        RESERVE_SPACE(8+sizeof(arg->stateid.data));
        WRITE32(OP_OPEN_CONFIRM);
        WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
-       WRITE32(arg->seqid);
+       WRITE32(arg->seqid->sequence->counter);
 
        return 0;
 }
@@ -950,10 +976,10 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea
 {
        uint32_t *p;
 
-       RESERVE_SPACE(8+sizeof(arg->stateid.data));
+       RESERVE_SPACE(8+sizeof(arg->stateid->data));
        WRITE32(OP_OPEN_DOWNGRADE);
-       WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
-       WRITE32(arg->seqid);
+       WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
+       WRITE32(arg->seqid->sequence->counter);
        encode_share_access(xdr, arg->open_flags);
        return 0;
 }
@@ -1116,6 +1142,17 @@ static int encode_renew(struct xdr_stream *xdr, const struct nfs4_client *client
        return 0;
 }
 
+static int
+encode_restorefh(struct xdr_stream *xdr)
+{
+       uint32_t *p;
+
+       RESERVE_SPACE(4);
+       WRITE32(OP_RESTOREFH);
+
+       return 0;
+}
+
 static int
 encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg)
 {
@@ -1296,14 +1333,18 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, uint32_t *p, const struct n
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 2,
+               .nops = 3,
        };
        int status;
 
        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
        encode_compound_hdr(&xdr, &hdr);
-       if ((status = encode_putfh(&xdr, args->fh)) == 0)
-               status = encode_remove(&xdr, args->name);
+       if ((status = encode_putfh(&xdr, args->fh)) != 0)
+               goto out;
+       if ((status = encode_remove(&xdr, args->name)) != 0)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
+out:
        return status;
 }
 
@@ -1314,7 +1355,7 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct n
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 4,
+               .nops = 7,
        };
        int status;
 
@@ -1326,7 +1367,13 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct n
                goto out;
        if ((status = encode_putfh(&xdr, args->new_dir)) != 0)
                goto out;
-       status = encode_rename(&xdr, args->old_name, args->new_name);
+       if ((status = encode_rename(&xdr, args->old_name, args->new_name)) != 0)
+               goto out;
+       if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
+               goto out;
+       if ((status = encode_restorefh(&xdr)) != 0)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
 }
@@ -1338,7 +1385,7 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 4,
+               .nops = 7,
        };
        int status;
 
@@ -1350,7 +1397,13 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs
                goto out;
        if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
                goto out;
-       status = encode_link(&xdr, args->name);
+       if ((status = encode_link(&xdr, args->name)) != 0)
+               goto out;
+       if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
+               goto out;
+       if ((status = encode_restorefh(&xdr)) != 0)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
 }
@@ -1362,7 +1415,7 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct n
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 4,
+               .nops = 7,
        };
        int status;
 
@@ -1370,10 +1423,16 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct n
        encode_compound_hdr(&xdr, &hdr);
        if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
                goto out;
+       if ((status = encode_savefh(&xdr)) != 0)
+               goto out;
        if ((status = encode_create(&xdr, args)) != 0)
                goto out;
        if ((status = encode_getfh(&xdr)) != 0)
                goto out;
+       if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
+               goto out;
+       if ((status = encode_restorefh(&xdr)) != 0)
+               goto out;
        status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
@@ -1412,7 +1471,7 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_clos
 {
         struct xdr_stream xdr;
         struct compound_hdr hdr = {
-                .nops   = 2,
+                .nops   = 3,
         };
         int status;
 
@@ -1422,6 +1481,9 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_clos
         if(status)
                 goto out;
         status = encode_close(&xdr, args);
+       if (status != 0)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
         return status;
 }
@@ -1433,13 +1495,19 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 4,
+               .nops = 7,
        };
        int status;
 
+       status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+       if (status != 0)
+               goto out;
        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
        encode_compound_hdr(&xdr, &hdr);
        status = encode_putfh(&xdr, args->fh);
+       if (status)
+               goto out;
+       status = encode_savefh(&xdr);
        if (status)
                goto out;
        status = encode_open(&xdr, args);
@@ -1449,6 +1517,12 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena
        if (status)
                goto out;
        status = encode_getfattr(&xdr, args->bitmask);
+       if (status)
+               goto out;
+       status = encode_restorefh(&xdr);
+       if (status)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
 }
@@ -1464,6 +1538,9 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, uint32_t *p, struct n
        };
        int status;
 
+       status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+       if (status != 0)
+               goto out;
        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
        encode_compound_hdr(&xdr, &hdr);
        status = encode_putfh(&xdr, args->fh);
@@ -1485,6 +1562,9 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nf
        };
        int status;
 
+       status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+       if (status != 0)
+               goto out;
        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
        encode_compound_hdr(&xdr, &hdr);
        status = encode_putfh(&xdr, args->fh);
@@ -1502,7 +1582,7 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops   = 2,
+               .nops   = 3,
        };
        int status;
 
@@ -1512,6 +1592,9 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct
        if (status)
                goto out;
        status = encode_open_downgrade(&xdr, args);
+       if (status != 0)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
 }
@@ -1525,8 +1608,15 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_locka
        struct compound_hdr hdr = {
                .nops   = 2,
        };
+       struct nfs_lock_opargs *opargs = args->u.lock;
        int status;
 
+       status = nfs_wait_on_sequence(opargs->lock_seqid, req->rq_task);
+       if (status != 0)
+               goto out;
+       /* Do we need to do an open_to_lock_owner? */
+       if (opargs->lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)
+               opargs->new_lock_owner = 0;
        xdr_init_encode(&xdr, &req->rq_snd_buf, p);
        encode_compound_hdr(&xdr, &hdr);
        status = encode_putfh(&xdr, args->fh);
@@ -1713,7 +1803,7 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writ
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 2,
+               .nops = 3,
        };
        int status;
 
@@ -1723,6 +1813,9 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writ
        if (status)
                goto out;
        status = encode_write(&xdr, args);
+       if (status)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
 }
@@ -1734,7 +1827,7 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_wri
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr = {
-               .nops = 2,
+               .nops = 3,
        };
        int status;
 
@@ -1744,6 +1837,9 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_wri
        if (status)
                goto out;
        status = encode_commit(&xdr, args);
+       if (status)
+               goto out;
+       status = encode_getfattr(&xdr, args->bitmask);
 out:
        return status;
 }
@@ -2670,8 +2766,7 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re
                goto xdr_error;
        status = verify_attr_len(xdr, savep, attrlen);
 xdr_error:
-       if (status != 0)
-               printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+       dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
        return status;
 }
        
@@ -2704,8 +2799,7 @@ static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
 
        status = verify_attr_len(xdr, savep, attrlen);
 xdr_error:
-       if (status != 0)
-               printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+       dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
        return status;
 }
 
@@ -2730,8 +2824,7 @@ static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf
 
        status = verify_attr_len(xdr, savep, attrlen);
 xdr_error:
-       if (status != 0)
-               printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+       dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
        return status;
 }
 
@@ -2787,13 +2880,10 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons
                goto xdr_error;
        if ((status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime)) != 0)
                goto xdr_error;
-       if ((status = verify_attr_len(xdr, savep, attrlen)) == 0) {
+       if ((status = verify_attr_len(xdr, savep, attrlen)) == 0)
                fattr->valid = NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4;
-               fattr->timestamp = jiffies;
-       }
 xdr_error:
-       if (status != 0)
-               printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+       dprintk("%s: xdr returned %d\n", __FUNCTION__, -status);
        return status;
 }
 
@@ -2826,8 +2916,7 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
 
        status = verify_attr_len(xdr, savep, attrlen);
 xdr_error:
-       if (status != 0)
-               printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+       dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
        return status;
 }
 
@@ -2890,8 +2979,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lockres *res)
 
        status = decode_op_hdr(xdr, OP_LOCK);
        if (status == 0) {
-               READ_BUF(sizeof(nfs4_stateid));
-               COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+               READ_BUF(sizeof(res->u.stateid.data));
+               COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
        } else if (status == -NFS4ERR_DENIED)
                return decode_lock_denied(xdr, &res->u.denied);
        return status;
@@ -2913,8 +3002,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_lockres *res)
 
        status = decode_op_hdr(xdr, OP_LOCKU);
        if (status == 0) {
-               READ_BUF(sizeof(nfs4_stateid));
-               COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+               READ_BUF(sizeof(res->u.stateid.data));
+               COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
        }
        return status;
 }
@@ -2994,7 +3083,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
         p += bmlen;
        return decode_delegation(xdr, res);
 xdr_error:
-       printk(KERN_NOTICE "%s: xdr error!\n", __FUNCTION__);
+       dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen);
        return -EIO;
 }
 
@@ -3208,6 +3297,12 @@ static int decode_renew(struct xdr_stream *xdr)
        return decode_op_hdr(xdr, OP_RENEW);
 }
 
+static int
+decode_restorefh(struct xdr_stream *xdr)
+{
+       return decode_op_hdr(xdr, OP_RESTOREFH);
+}
+
 static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                size_t *acl_len)
 {
@@ -3243,7 +3338,8 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                if (attrlen <= *acl_len)
                        xdr_read_pages(xdr, attrlen);
                *acl_len = attrlen;
-       }
+       } else
+               status = -EOPNOTSUPP;
 
 out:
        return status;
@@ -3352,6 +3448,9 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, uint32_t *p, stru
         if (status)
                 goto out;
         status = decode_open_downgrade(&xdr, res);
+       if (status != 0)
+               goto out;
+       decode_getfattr(&xdr, res->fattr, res->server);
 out:
         return status;
 }
@@ -3424,7 +3523,7 @@ out:
 /*
  * Decode REMOVE response
  */
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_remove_res *res)
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr;
@@ -3433,8 +3532,11 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_
        xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
        if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
                goto out;
-       if ((status = decode_putfh(&xdr)) == 0)
-               status = decode_remove(&xdr, cinfo);
+       if ((status = decode_putfh(&xdr)) != 0)
+               goto out;
+       if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
+               goto out;
+       decode_getfattr(&xdr, res->dir_attr, res->server);
 out:
        return status;
 }
@@ -3457,7 +3559,14 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_
                goto out;
        if ((status = decode_putfh(&xdr)) != 0)
                goto out;
-       status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo);
+       if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
+               goto out;
+       /* Current FH is target directory */
+       if (decode_getfattr(&xdr, res->new_fattr, res->server) != 0)
+               goto out;
+       if ((status = decode_restorefh(&xdr)) != 0)
+               goto out;
+       decode_getfattr(&xdr, res->old_fattr, res->server);
 out:
        return status;
 }
@@ -3465,7 +3574,7 @@ out:
 /*
  * Decode LINK response
  */
-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo)
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_link_res *res)
 {
        struct xdr_stream xdr;
        struct compound_hdr hdr;
@@ -3480,7 +3589,17 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_ch
                goto out;
        if ((status = decode_putfh(&xdr)) != 0)
                goto out;
-       status = decode_link(&xdr, cinfo);
+       if ((status = decode_link(&xdr, &res->cinfo)) != 0)
+               goto out;
+       /*
+        * Note order: OP_LINK leaves the directory as the current
+        *             filehandle.
+        */
+       if (decode_getfattr(&xdr, res->dir_attr, res->server) != 0)
+               goto out;
+       if ((status = decode_restorefh(&xdr)) != 0)
+               goto out;
+       decode_getfattr(&xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -3499,13 +3618,17 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_
                goto out;
        if ((status = decode_putfh(&xdr)) != 0)
                goto out;
+       if ((status = decode_savefh(&xdr)) != 0)
+               goto out;
        if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
                goto out;
        if ((status = decode_getfh(&xdr, res->fh)) != 0)
                goto out;
-       status = decode_getfattr(&xdr, res->fattr, res->server);
-       if (status == NFS4ERR_DELAY)
-               status = 0;
+       if (decode_getfattr(&xdr, res->fattr, res->server) != 0)
+               goto out;
+       if ((status = decode_restorefh(&xdr)) != 0)
+               goto out;
+       decode_getfattr(&xdr, res->dir_fattr, res->server);
 out:
        return status;
 }
@@ -3623,6 +3746,15 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_cl
         if (status)
                 goto out;
         status = decode_close(&xdr, res);
+       if (status != 0)
+               goto out;
+       /*
+        * Note: Server may do delete on close for this file
+        *      in which case the getattr call will fail with
+        *      an ESTALE error. Shouldn't be a problem,
+        *      though, since fattr->valid will remain unset.
+        */
+       decode_getfattr(&xdr, res->fattr, res->server);
 out:
         return status;
 }
@@ -3643,15 +3775,20 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_ope
         status = decode_putfh(&xdr);
         if (status)
                 goto out;
+        status = decode_savefh(&xdr);
+       if (status)
+               goto out;
         status = decode_open(&xdr, res);
         if (status)
                 goto out;
        status = decode_getfh(&xdr, &res->fh);
         if (status)
                goto out;
-       status = decode_getfattr(&xdr, res->f_attr, res->server);
-       if (status == NFS4ERR_DELAY)
-               status = 0;
+       if (decode_getfattr(&xdr, res->f_attr, res->server) != 0)
+               goto out;
+       if ((status = decode_restorefh(&xdr)) != 0)
+               goto out;
+       decode_getfattr(&xdr, res->dir_attr, res->server);
 out:
         return status;
 }
@@ -3869,6 +4006,9 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_wr
        if (status)
                goto out;
        status = decode_write(&xdr, res);
+       if (status)
+               goto out;
+       decode_getfattr(&xdr, res->fattr, res->server);
        if (!status)
                status = res->count;
 out:
@@ -3892,6 +4032,9 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_w
        if (status)
                goto out;
        status = decode_commit(&xdr, res);
+       if (status)
+               goto out;
+       decode_getfattr(&xdr, res->fattr, res->server);
 out:
        return status;
 }
index be23c3fb9260051b8069ad4afbf2257bed1ca502..a48a003242c006c94f8823fdc081d3c03289289e 100644 (file)
@@ -61,7 +61,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
        int status;
 
        dprintk("%s: call getattr\n", __FUNCTION__);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call(server->client_sys, NFSPROC_GETATTR, fhandle, fattr, 0);
        dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
        if (status)
@@ -93,7 +93,7 @@ nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  getattr\n");
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call(server->client, NFSPROC_GETATTR,
                                fhandle, fattr, 0);
        dprintk("NFS reply getattr: %d\n", status);
@@ -112,7 +112,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        int     status;
 
        dprintk("NFS call  setattr\n");
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
        if (status == 0)
                nfs_setattr_update_inode(inode, sattr);
@@ -136,7 +136,7 @@ nfs_proc_lookup(struct inode *dir, struct qstr *name,
        int                     status;
 
        dprintk("NFS call  lookup %s\n", name->name);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call(NFS_CLIENT(dir), NFSPROC_LOOKUP, &arg, &res, 0);
        dprintk("NFS reply lookup: %d\n", status);
        return status;
@@ -174,7 +174,7 @@ static int nfs_proc_read(struct nfs_read_data *rdata)
 
        dprintk("NFS call  read %d @ %Ld\n", rdata->args.count,
                        (long long) rdata->args.offset);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
        if (status >= 0) {
                nfs_refresh_inode(inode, fattr);
@@ -203,10 +203,10 @@ static int nfs_proc_write(struct nfs_write_data *wdata)
 
        dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
                        (long long) wdata->args.offset);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
        if (status >= 0) {
-               nfs_refresh_inode(inode, fattr);
+               nfs_post_op_update_inode(inode, fattr);
                wdata->res.count = wdata->args.count;
                wdata->verf.committed = NFS_FILE_SYNC;
        }
@@ -216,7 +216,7 @@ static int nfs_proc_write(struct nfs_write_data *wdata)
 
 static int
 nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-               int flags)
+               int flags, struct nameidata *nd)
 {
        struct nfs_fh           fhandle;
        struct nfs_fattr        fattr;
@@ -232,7 +232,7 @@ nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        };
        int                     status;
 
-       fattr.valid = 0;
+       nfs_fattr_init(&fattr);
        dprintk("NFS call  create %s\n", dentry->d_name.name);
        status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
        if (status == 0)
@@ -273,12 +273,13 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
                sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
        }
 
-       fattr.valid = 0;
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
+       nfs_mark_for_revalidate(dir);
 
        if (status == -EINVAL && S_ISFIFO(mode)) {
                sattr->ia_mode = mode;
-               fattr.valid = 0;
+               nfs_fattr_init(&fattr);
                status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
        }
        if (status == 0)
@@ -305,6 +306,7 @@ nfs_proc_remove(struct inode *dir, struct qstr *name)
 
        dprintk("NFS call  remove %s\n", name->name);
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+       nfs_mark_for_revalidate(dir);
 
        dprintk("NFS reply remove: %d\n", status);
        return status;
@@ -331,8 +333,10 @@ nfs_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
 {
        struct rpc_message *msg = &task->tk_msg;
        
-       if (msg->rpc_argp)
+       if (msg->rpc_argp) {
+               nfs_mark_for_revalidate(dir->d_inode);
                kfree(msg->rpc_argp);
+       }
        return 0;
 }
 
@@ -352,6 +356,8 @@ nfs_proc_rename(struct inode *old_dir, struct qstr *old_name,
 
        dprintk("NFS call  rename %s -> %s\n", old_name->name, new_name->name);
        status = rpc_call(NFS_CLIENT(old_dir), NFSPROC_RENAME, &arg, NULL, 0);
+       nfs_mark_for_revalidate(old_dir);
+       nfs_mark_for_revalidate(new_dir);
        dprintk("NFS reply rename: %d\n", status);
        return status;
 }
@@ -369,6 +375,7 @@ nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
 
        dprintk("NFS call  link %s\n", name->name);
        status = rpc_call(NFS_CLIENT(inode), NFSPROC_LINK, &arg, NULL, 0);
+       nfs_mark_for_revalidate(dir);
        dprintk("NFS reply link: %d\n", status);
        return status;
 }
@@ -391,9 +398,10 @@ nfs_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
        if (path->len > NFS2_MAXPATHLEN)
                return -ENAMETOOLONG;
        dprintk("NFS call  symlink %s -> %s\n", name->name, path->name);
-       fattr->valid = 0;
+       nfs_fattr_init(fattr);
        fhandle->size = 0;
        status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0);
+       nfs_mark_for_revalidate(dir);
        dprintk("NFS reply symlink: %d\n", status);
        return status;
 }
@@ -416,8 +424,9 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
        int                     status;
 
        dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
-       fattr.valid = 0;
+       nfs_fattr_init(&fattr);
        status = rpc_call(NFS_CLIENT(dir), NFSPROC_MKDIR, &arg, &res, 0);
+       nfs_mark_for_revalidate(dir);
        if (status == 0)
                status = nfs_instantiate(dentry, &fhandle, &fattr);
        dprintk("NFS reply mkdir: %d\n", status);
@@ -436,6 +445,7 @@ nfs_proc_rmdir(struct inode *dir, struct qstr *name)
 
        dprintk("NFS call  rmdir %s\n", name->name);
        status = rpc_call(NFS_CLIENT(dir), NFSPROC_RMDIR, &arg, NULL, 0);
+       nfs_mark_for_revalidate(dir);
        dprintk("NFS reply rmdir: %d\n", status);
        return status;
 }
@@ -484,7 +494,7 @@ nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  statfs\n");
-       stat->fattr->valid = 0;
+       nfs_fattr_init(stat->fattr);
        status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
        dprintk("NFS reply statfs: %d\n", status);
        if (status)
@@ -507,7 +517,7 @@ nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
        int     status;
 
        dprintk("NFS call  fsinfo\n");
-       info->fattr->valid = 0;
+       nfs_fattr_init(info->fattr);
        status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
        dprintk("NFS reply fsinfo: %d\n", status);
        if (status)
@@ -579,7 +589,7 @@ nfs_write_done(struct rpc_task *task)
        struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
 
        if (task->tk_status >= 0)
-               nfs_refresh_inode(data->inode, data->res.fattr);
+               nfs_post_op_update_inode(data->inode, data->res.fattr);
        nfs_writeback_done(task);
 }
 
index 9758ebd49905a704146fffc23443890b38b8e318..43b03b19731b52bfeabfe0b063e9bc70e9d07e2d 100644 (file)
@@ -215,6 +215,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
        data->res.fattr   = &data->fattr;
        data->res.count   = count;
        data->res.eof     = 0;
+       nfs_fattr_init(&data->fattr);
 
        NFS_PROTO(inode)->read_setup(data);
 
index 5130eda231d7a0513e887a3138e26aef3b1f5cbc..819a65f5071f8e9482e0cff5114e4574a940fd1b 100644 (file)
@@ -870,6 +870,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
        data->res.fattr   = &data->fattr;
        data->res.count   = count;
        data->res.verf    = &data->verf;
+       nfs_fattr_init(&data->fattr);
 
        NFS_PROTO(inode)->write_setup(data, how);
 
@@ -1237,6 +1238,7 @@ static void nfs_commit_rpcsetup(struct list_head *head,
        data->res.count   = 0;
        data->res.fattr   = &data->fattr;
        data->res.verf    = &data->verf;
+       nfs_fattr_init(&data->fattr);
        
        NFS_PROTO(inode)->commit_setup(data, how);
 
index f0d90cf0495c490316d427e309c38695ceb4e005..8d06ec911fd9df99762741ecb2ae7cf6b75ab8b0 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -739,7 +739,8 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
 }
 
 static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
-                                       int flags, struct file *f)
+                                       int flags, struct file *f,
+                                       int (*open)(struct inode *, struct file *))
 {
        struct inode *inode;
        int error;
@@ -761,11 +762,14 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
        f->f_op = fops_get(inode->i_fop);
        file_move(f, &inode->i_sb->s_files);
 
-       if (f->f_op && f->f_op->open) {
-               error = f->f_op->open(inode,f);
+       if (!open && f->f_op)
+               open = f->f_op->open;
+       if (open) {
+               error = open(inode, f);
                if (error)
                        goto cleanup_all;
        }
+
        f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
 
        file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
@@ -814,28 +818,75 @@ struct file *filp_open(const char * filename, int flags, int mode)
 {
        int namei_flags, error;
        struct nameidata nd;
-       struct file *f;
 
        namei_flags = flags;
        if ((namei_flags+1) & O_ACCMODE)
                namei_flags++;
-       if (namei_flags & O_TRUNC)
-               namei_flags |= 2;
-
-       error = -ENFILE;
-       f = get_empty_filp();
-       if (f == NULL)
-               return ERR_PTR(error);
 
        error = open_namei(filename, namei_flags, mode, &nd);
        if (!error)
-               return __dentry_open(nd.dentry, nd.mnt, flags, f);
+               return nameidata_to_filp(&nd, flags);
 
-       put_filp(f);
        return ERR_PTR(error);
 }
 EXPORT_SYMBOL(filp_open);
 
+/**
+ * lookup_instantiate_filp - instantiates the open intent filp
+ * @nd: pointer to nameidata
+ * @dentry: pointer to dentry
+ * @open: open callback
+ *
+ * Helper for filesystems that want to use lookup open intents and pass back
+ * a fully instantiated struct file to the caller.
+ * This function is meant to be called from within a filesystem's
+ * lookup method.
+ * Note that in case of error, nd->intent.open.file is destroyed, but the
+ * path information remains valid.
+ * If the open callback is set to NULL, then the standard f_op->open()
+ * filesystem callback is substituted.
+ */
+struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+               int (*open)(struct inode *, struct file *))
+{
+       if (IS_ERR(nd->intent.open.file))
+               goto out;
+       if (IS_ERR(dentry))
+               goto out_err;
+       nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt),
+                                            nd->intent.open.flags - 1,
+                                            nd->intent.open.file,
+                                            open);
+out:
+       return nd->intent.open.file;
+out_err:
+       release_open_intent(nd);
+       nd->intent.open.file = (struct file *)dentry;
+       goto out;
+}
+EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
+
+/**
+ * nameidata_to_filp - convert a nameidata to an open filp.
+ * @nd: pointer to nameidata
+ * @flags: open flags
+ *
+ * Note that this function destroys the original nameidata
+ */
+struct file *nameidata_to_filp(struct nameidata *nd, int flags)
+{
+       struct file *filp;
+
+       /* Pick up the filp from the open intent */
+       filp = nd->intent.open.file;
+       /* Has the filesystem initialised the file for us? */
+       if (filp->f_dentry == NULL)
+               filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL);
+       else
+               path_release(nd);
+       return filp;
+}
+
 struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
 {
        int error;
@@ -846,7 +897,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
        if (f == NULL)
                return ERR_PTR(error);
 
-       return __dentry_open(dentry, mnt, flags, f);
+       return __dentry_open(dentry, mnt, flags, f, NULL);
 }
 EXPORT_SYMBOL(dentry_open);
 
index 77e178f13162abef0305852554dbfa73d2a99ff0..1e848648a322b158cce120d70c36966349f10cff 100644 (file)
@@ -430,7 +430,7 @@ void del_gendisk(struct gendisk *disk)
        disk->flags &= ~GENHD_FL_UP;
        unlink_gendisk(disk);
        disk_stat_set_all(disk, 0);
-       disk->stamp = disk->stamp_idle = 0;
+       disk->stamp = 0;
 
        devfs_remove_disk(disk);
 
index 2706e2adffab2df85da09f8884f68f38d5b9a747..45829889dcdc0a13dd93bcd523b305c5756f71dc 100644 (file)
@@ -2022,7 +2022,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h)
 }
 
 #ifdef CONFIG_REISERFS_CHECK
-void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s)
+void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s)
 {
        void *vp;
        static size_t malloced;
index d76ee6c4f9b8874f1a26acdaa37a9fe9d693e490..5f82352b97e179c263366e550ab8f7564ebf1d16 100644 (file)
@@ -2842,7 +2842,7 @@ static int reiserfs_set_page_dirty(struct page *page)
  * even in -o notail mode, we can't be sure an old mount without -o notail
  * didn't create files with tails.
  */
-static int reiserfs_releasepage(struct page *page, int unused_gfp_flags)
+static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 {
        struct inode *inode = page->mapping->host;
        struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
index 87ac9dc8b381e8325ec2e26a1411c2e6d9449cc9..72e120798677a0ff20852c4a528bc8c4c5c9aa12 100644 (file)
@@ -453,7 +453,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
        struct page *page;
        /* We can deadlock if we try to free dentries,
           and an unlink/rmdir has just occured - GFP_NOFS avoids this */
-       mapping->flags = (mapping->flags & ~__GFP_BITS_MASK) | GFP_NOFS;
+       mapping_set_gfp_mask(mapping, GFP_NOFS);
        page = read_cache_page(mapping, n,
                               (filler_t *) mapping->a_ops->readpage, NULL);
        if (!IS_ERR(page)) {
index d2653b589b1c9d431d225e0d83de88fd95320565..3c92162dc7285280ab22682abf57fd4c4a647442 100644 (file)
 
 
 void *
-kmem_alloc(size_t size, gfp_t flags)
+kmem_alloc(size_t size, unsigned int __nocast flags)
 {
-       int             retries = 0;
-       unsigned int    lflags = kmem_flags_convert(flags);
-       void            *ptr;
+       int     retries = 0;
+       gfp_t   lflags = kmem_flags_convert(flags);
+       void    *ptr;
 
        do {
                if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
@@ -67,7 +67,7 @@ kmem_alloc(size_t size, gfp_t flags)
 }
 
 void *
-kmem_zalloc(size_t size, gfp_t flags)
+kmem_zalloc(size_t size, unsigned int __nocast flags)
 {
        void    *ptr;
 
@@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size)
 
 void *
 kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
-            gfp_t flags)
+            unsigned int __nocast flags)
 {
        void    *new;
 
@@ -105,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
 }
 
 void *
-kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
+kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
 {
-       int             retries = 0;
-       unsigned int    lflags = kmem_flags_convert(flags);
-       void            *ptr;
+       int     retries = 0;
+       gfp_t   lflags = kmem_flags_convert(flags);
+       void    *ptr;
 
        do {
                ptr = kmem_cache_alloc(zone, lflags);
@@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
 }
 
 void *
-kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags)
+kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
 {
        void    *ptr;
 
index ee7010f085bc1926e33f1ec98abd40c09bde76b7..f4bb78c268c0948e64b7d30122825e7adc7edd8b 100644 (file)
@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t;
        *(NSTATEP) = *(OSTATEP);        \
 } while (0)
 
-static __inline unsigned int kmem_flags_convert(gfp_t flags)
+static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags)
 {
-       unsigned int    lflags = __GFP_NOWARN;  /* we'll report problems, if need be */
+       gfp_t lflags = __GFP_NOWARN;    /* we'll report problems, if need be */
 
 #ifdef DEBUG
        if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
@@ -125,16 +125,16 @@ kmem_zone_destroy(kmem_zone_t *zone)
                BUG();
 }
 
-extern void        *kmem_zone_zalloc(kmem_zone_t *, gfp_t);
-extern void        *kmem_zone_alloc(kmem_zone_t *, gfp_t);
+extern void        *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void        *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
 
-extern void        *kmem_alloc(size_t, gfp_t);
-extern void        *kmem_realloc(void *, size_t, size_t, gfp_t);
-extern void        *kmem_zalloc(size_t, gfp_t);
+extern void        *kmem_alloc(size_t, unsigned int __nocast);
+extern void        *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
+extern void        *kmem_zalloc(size_t, unsigned int __nocast);
 extern void         kmem_free(void *, size_t);
 
 typedef struct shrinker *kmem_shaker_t;
-typedef int (*kmem_shake_func_t)(int, unsigned int);
+typedef int (*kmem_shake_func_t)(int, gfp_t);
 
 static __inline kmem_shaker_t
 kmem_shake_register(kmem_shake_func_t sfunc)
@@ -149,7 +149,7 @@ kmem_shake_deregister(kmem_shaker_t shrinker)
 }
 
 static __inline int
-kmem_shake_allow(unsigned int gfp_mask)
+kmem_shake_allow(gfp_t gfp_mask)
 {
        return (gfp_mask & __GFP_WAIT);
 }
index c6c077978fe38bbc662e11de0d40cf0846a5dcfc..7aa398724706a9d1d8d6e2dada0fd7126acae9d4 100644 (file)
@@ -1296,7 +1296,7 @@ linvfs_invalidate_page(
 STATIC int
 linvfs_release_page(
        struct page             *page,
-       int                     gfp_mask)
+       gfp_t                   gfp_mask)
 {
        struct inode            *inode = page->mapping->host;
        int                     dirty, delalloc, unmapped, unwritten;
index e82cf72ac599a55aad2539c6fe0158f2788b15d0..ba4767c04adfbf0fc3895a04086370f9f35b4d2b 100644 (file)
@@ -64,7 +64,7 @@
 
 STATIC kmem_cache_t *pagebuf_zone;
 STATIC kmem_shaker_t pagebuf_shake;
-STATIC int xfsbufd_wakeup(int, unsigned int);
+STATIC int xfsbufd_wakeup(int, gfp_t);
 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
 
 STATIC struct workqueue_struct *xfslogd_workqueue;
@@ -383,7 +383,7 @@ _pagebuf_lookup_pages(
        size_t                  blocksize = bp->pb_target->pbr_bsize;
        size_t                  size = bp->pb_count_desired;
        size_t                  nbytes, offset;
-       int                     gfp_mask = pb_to_gfp(flags);
+       gfp_t                   gfp_mask = pb_to_gfp(flags);
        unsigned short          page_count, i;
        pgoff_t                 first;
        loff_t                  end;
@@ -1749,8 +1749,8 @@ STATIC int xfsbufd_force_sleep;
 
 STATIC int
 xfsbufd_wakeup(
-       int                     priority,
-       unsigned int            mask)
+       int             priority,
+       gfp_t           mask)
 {
        if (xfsbufd_force_sleep)
                return 0;
index c675f282d6ad9d8090ddefeb42d25343635b161a..680f7ecbb28f7df56d7e85a2089f45d454a4f751 100644 (file)
@@ -31,7 +31,7 @@
 #else  /* no PCI - no IOMMU. */
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, int gfp);
+                        dma_addr_t *dma_handle, gfp_t gfp);
 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
               enum dma_data_direction direction);
 
index 0e9b7e18af05650cd80aca9eec46b4791ed8a686..002227924b9fa09c48a80b909ecb7329d219cce2 100644 (file)
 #error You must include hardware.h not this file
 #endif /* __ASM_ARCH_HARDWARE_H */
 
+/* Chip selects */
+#define AAEC_CS0       0x00000000
+#define AAEC_CS1       0x10000000
+#define AAEC_CS2       0x20000000
+#define AAEC_CS3       0x30000000
+
+/* Flash */
+#define AAEC_FLASH_BASE        AAEC_CS0
+#define AAEC_FLASH_SIZE        SZ_64M
+
 /* Interrupt controller */
 #define IRQ_BASE       __REG(0x80000500)
 #define IRQ_INTSR      __REG(0x80000500)       /* Int Status Register */
 #define POWER_STFCLR   __REG(0x8000041c) /* NbFlg, RSTFlg, PFFlg, CLDFlg Clear */
 #define POWER_CLKSET   __REG(0x80000420) /* Clock Speed Control */
 
+/* GPIO Registers */
+#define AAEC_GPIO_PHYS 0x80000e00
+
+#define AAEC_GPIO_PADR         __REG(AAEC_GPIO_PHYS + 0x00)
+#define AAEC_GPIO_PBDR         __REG(AAEC_GPIO_PHYS + 0x04)
+#define AAEC_GPIO_PCDR         __REG(AAEC_GPIO_PHYS + 0x08)
+#define AAEC_GPIO_PDDR         __REG(AAEC_GPIO_PHYS + 0x0c)
+#define AAEC_GPIO_PADDR                __REG(AAEC_GPIO_PHYS + 0x10)
+#define AAEC_GPIO_PBDDR                __REG(AAEC_GPIO_PHYS + 0x14)
+#define AAEC_GPIO_PCDDR                __REG(AAEC_GPIO_PHYS + 0x18)
+#define AAEC_GPIO_PDDDR                __REG(AAEC_GPIO_PHYS + 0x1c)
+#define AAEC_GPIO_PEDR         __REG(AAEC_GPIO_PHYS + 0x20)
+#define AAEC_GPIO_PEDDR                __REG(AAEC_GPIO_PHYS + 0x24)
+#define AAEC_GPIO_KSCAN                __REG(AAEC_GPIO_PHYS + 0x28)
+#define AAEC_GPIO_PINMUX       __REG(AAEC_GPIO_PHYS + 0x2c)
+#define AAEC_GPIO_PFDR         __REG(AAEC_GPIO_PHYS + 0x30)
+#define AAEC_GPIO_PFDDR                __REG(AAEC_GPIO_PHYS + 0x34)
+#define AAEC_GPIO_PGDR         __REG(AAEC_GPIO_PHYS + 0x38)
+#define AAEC_GPIO_PGDDR                __REG(AAEC_GPIO_PHYS + 0x3c)
+#define AAEC_GPIO_PHDR         __REG(AAEC_GPIO_PHYS + 0x40)
+#define AAEC_GPIO_PHDDR                __REG(AAEC_GPIO_PHYS + 0x44)
+#define AAEC_GPIO_RAZ          __REG(AAEC_GPIO_PHYS + 0x48)
+#define AAEC_GPIO_INTTYPE1     __REG(AAEC_GPIO_PHYS + 0x4c)
+#define AAEC_GPIO_INTTYPE2     __REG(AAEC_GPIO_PHYS + 0x50)
+#define AAEC_GPIO_FEOI         __REG(AAEC_GPIO_PHYS + 0x54)
+#define AAEC_GPIO_INTEN                __REG(AAEC_GPIO_PHYS + 0x58)
+#define AAEC_GPIO_INTSTATUS    __REG(AAEC_GPIO_PHYS + 0x5c)
+#define AAEC_GPIO_RAWINTSTATUS __REG(AAEC_GPIO_PHYS + 0x60)
+#define AAEC_GPIO_DB           __REG(AAEC_GPIO_PHYS + 0x64)
+#define AAEC_GPIO_PAPINDR      __REG(AAEC_GPIO_PHYS + 0x68)
+#define AAEC_GPIO_PBPINDR      __REG(AAEC_GPIO_PHYS + 0x6c)
+#define AAEC_GPIO_PCPINDR      __REG(AAEC_GPIO_PHYS + 0x70)
+#define AAEC_GPIO_PDPINDR      __REG(AAEC_GPIO_PHYS + 0x74)
+#define AAEC_GPIO_PEPINDR      __REG(AAEC_GPIO_PHYS + 0x78)
+#define AAEC_GPIO_PFPINDR      __REG(AAEC_GPIO_PHYS + 0x7c)
+#define AAEC_GPIO_PGPINDR      __REG(AAEC_GPIO_PHYS + 0x80)
+#define AAEC_GPIO_PHPINDR      __REG(AAEC_GPIO_PHYS + 0x84)
+
+#define AAEC_GPIO_PINMUX_PE0CON                (1 << 0)
+#define AAEC_GPIO_PINMUX_PD0CON                (1 << 1)
+#define AAEC_GPIO_PINMUX_CODECON       (1 << 2)
+#define AAEC_GPIO_PINMUX_UART3CON      (1 << 3)
+
+/* LCD Controller */
+#define AAEC_CLCD_PHYS 0x80003000
+
 #endif /* __ARM_ARCH_AAEC2000_H */
diff --git a/include/asm-arm/arch-aaec2000/aaed2000.h b/include/asm-arm/arch-aaec2000/aaed2000.h
new file mode 100644 (file)
index 0000000..bc76d2b
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ *  linux/include/asm-arm/arch-aaec2000/aaed2000.h
+ *
+ *  AAED-2000 specific bits definition
+ *
+ *  Copyright (c) 2005 Nicolas Bellido Y Ortega
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_AAED2000_H
+#define __ASM_ARCH_AAED2000_H
+
+/* External GPIOs. */
+
+#define EXT_GPIO_PBASE AAEC_CS3
+#define EXT_GPIO_VBASE 0xf8100000
+#define EXT_GPIO_LENGTH        0x00001000
+
+#define __ext_gpio_p2v(x)      ((x) - EXT_GPIO_PBASE + EXT_GPIO_VBASE)
+#define __ext_gpio_v2p(x)      ((x) + EXT_GPIO_PBASE - EXT_GPIO_VBASE)
+
+#define __EXT_GPIO_REG(x)      (*((volatile u32 *)__ext_gpio_p2v(x)))
+#define __EXT_GPIO_PREG(x)     (__ext_gpio_v2p((u32)&(x)))
+
+#define AAED_EXT_GPIO  __EXT_GPIO_REG(EXT_GPIO_PBASE)
+
+#define AAED_EGPIO_KBD_SCAN    0x00003fff /* Keyboard scan data */
+#define AAED_EGPIO_PWR_INT     0x00008fff /* Smart battery charger interrupt */
+#define AAED_EGPIO_SWITCHED    0x000f0000 /* DIP Switches */
+#define AAED_EGPIO_USB_VBUS    0x00400000 /* USB Vbus sense */
+#define AAED_EGPIO_LCD_PWR_EN  0x02000000 /* LCD and backlight PWR enable */
+#define AAED_EGPIO_nLED0       0x20000000 /* LED 0 */
+#define AAED_EGPIO_nLED1       0x20000000 /* LED 1 */
+#define AAED_EGPIO_nLED2       0x20000000 /* LED 2 */
+
+
+#endif /* __ARM_ARCH_AAED2000_H */
index 4c37219e030e6483296a514a54e3aea8ce43ca7b..153506fd06ed0a78caa630383952f6817d54d153 100644 (file)
@@ -11,7 +11,8 @@
 #ifndef __ASM_ARCH_HARDWARE_H
 #define __ASM_ARCH_HARDWARE_H
 
-#include <linux/config.h>
+#include <asm/sizes.h>
+#include <asm/arch/aaec2000.h>
 
 /* The kernel is loaded at physical address 0xf8000000.
  * We map the IO space a bit after
index c58a8d10425a3f8304cb9fbe854cde84b52cd2d6..8d67907fd4f05815733d9bd93fb5ae54f9399f3b 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index f0113bc75630bd424ecbf61cb269b23059e380fd..89a33287f4fe6159aa56bbf54ecbc771ea7934b5 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index 1386871e1a5afa61505ebdf14b3632e11dadbeda..f864c367c934426c96ac45bd77a18c17f2b4efa1 100644 (file)
 #define CEIVA_PB0_BLK_BTN      (1<<0)
 #endif // #if defined (CONFIG_ARCH_CEIVA)
 
+#if defined (CONFIG_MACH_MP1000)
+/* NOR FLASH */
+#define MP1000_NIO_BASE                0xf9000000      /* virtual */
+#define MP1000_NIO_START       CS0_PHYS_BASE   /* physical */
+#define MP1000_NIO_SIZE                0x00400000
+
+/* DSP Interface */
+#define MP1000_DSP_BASE                0xfa000000      /* virtual */
+#define MP1000_DSP_START       CS1_PHYS_BASE   /* physical */
+#define MP1000_DSP_SIZE                0x00100000
+
+/* LCD, DAA/DSP, RTC, DAA RW Reg all in CS2 */
+#define MP1000_LIO_BASE                0xfb000000      /* virtual */
+#define MP1000_LIO_START       CS2_PHYS_BASE   /* physical */
+#define MP1000_LIO_SIZE                0x00100000
+
+/* NAND FLASH */
+#define MP1000_FIO_BASE                0xfc000000      /* virtual */
+#define MP1000_FIO_START       CS3_PHYS_BASE   /* physical */
+#define MP1000_FIO_SIZE                0x00800000
+
+/* Ethernet */
+#define MP1000_EIO_BASE                0xfd000000      /* virtual      */
+#define MP1000_EIO_START       CS4_PHYS_BASE   /* physical     */
+#define MP1000_EIO_SIZE                0x00100000
+
+#define        MP1000_LCD_OFFSET       0x00000000      /* LCD offset in CS2 */
+#define        MP1000_DDD_OFFSET       0x00001000      /* DAA/DAI/DSP sft reset offst*/
+#define        MP1000_RTC_OFFSET       0x00002000      /* RTC offset in CS2 */
+#define        MP1000_DAA_OFFSET       0x00003000      /* DAA RW reg offset in CS2 */
+
+/* IDE */
+#define MP1000_IDE_BASE                0xfe000000      /* virtual */
+#define MP1000_IDE_START       CS5_PHYS_BASE      /* physical */
+#define MP1000_IDE_SIZE                0x00100000      /* actually it's only 0x1000 */
+
+#define IRQ_HARDDISK IRQ_EINT2
+
+/*
+ * IDE registers definition
+ */
+
+#define IDE_CONTROL_BASE               (MP1000_IDE_BASE + 0x1000)
+#define IDE_BASE_OFF                   (MP1000_IDE_BASE)
+
+#define IDE_WRITE_DEVICE_DATA          (IDE_BASE_OFF + 0x0)
+#define IDE_FEATURES_REGISTER          (IDE_BASE_OFF + 0x2)
+#define IDE_SECTOR_COUNT_REGISTER      (IDE_BASE_OFF + 0x4)
+#define IDE_SECTOR_NUMBER_REGISTER     (IDE_BASE_OFF + 0x6)
+#define IDE_CYLINDER_LOW_REGISTER      (IDE_BASE_OFF + 0x8)
+#define IDE_CYLINDER_HIGH_REGISTER     (IDE_BASE_OFF + 0xa)
+#define IDE_DEVICE_HEAD_REGISTER       (IDE_BASE_OFF + 0xc)
+#define IDE_COMMAND_DATA_REGISTER      (IDE_BASE_OFF + 0xe)
+#define IDE_DEVICE_CONTROL_REGISTER    (IDE_CONTROL_BASE + 0xc)
+
+#define IDE_IRQ                      IRQ_EINT2
+
+
+#define RTC_PORT(x)    (MP1000_LIO_BASE+0x2000 + (x*2))
+#define RTC_ALWAYS_BCD 0
+
+/*
+// Definitions of the bit fields in the HwPortA register for the
+// MP1000 board.
+*/
+#define HwPortAKeyboardRow1                     0x00000001
+#define HwPortAKeyboardRow2                     0x00000002
+#define HwPortAKeyboardRow3                     0x00000004
+#define HwPortAKeyboardRow4                     0x00000008
+#define HwPortAKeyboardRow5                     0x00000010
+#define HwPortAKeyboardRow6                     0x00000020
+#define HwPortALCDEnable                        0x00000040
+#define HwPortAOffhook                         0x00000080
+
+/*
+// Definitions of the bit fields in the HwPortB register for the
+// MP1000 board.
+*/
+#define HwPortBL3Mode                           0x00000001
+#define HwPortBL3Clk                            0x00000002
+#define HwPortBSClk                             0x00000001
+#define HwPortBSData                            0x00000002
+#define HwPortBL3Data                           0x00000004
+#define HwPortBMute                             0x00000008
+#define HwPortBQD0                              0x00000010
+#define HwPortBQD1                              0x00000020
+#define HwPortBQD2                              0x00000040
+#define HwPortBQD3                              0x00000080
+
+/*
+// Definitions of the bit fields in the HwPortD register for the
+// MP1000 board.
+*/
+#define HwPortDLED1                             0x00000001
+#define HwPortDLED2                             0x00000002
+#define HwPortDLED3                             0x00000004
+#define HwPortDLED4                             0x00000008
+#define HwPortDLED5                             0x00000010
+#define HwPortDEECS                             0x00000020
+#define HwPortBRTS                              0x00000040
+#define HwPortBRI                               0x00000080
+
+
+/*
+// Definitions of the bit fields in the HwPortE register for the
+// MP1000 board.
+*/
+
+#define HwPortECLE                              0x00000001
+#define HwPortESepromDOut                       0x00000001
+#define HwPortEALE                              0x00000002
+#define HwPortESepromDIn                        0x00000002
+#define HwPortENANDCS                           0x00000004
+#define HwPortESepromCLK                        0x00000004
+
+#endif // #if defined (CONFIG_MACH_MP1000)
+
 #endif
index 14d7e8da5453ff6ce7ed714e8405325ef30b9ebe..62613b0e2d9604dd0f6a919d6e7e47fa2aa05401 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 #define __io(a)                        ((void __iomem *)(a))
diff --git a/include/asm-arm/arch-clps711x/mp1000-seprom.h b/include/asm-arm/arch-clps711x/mp1000-seprom.h
new file mode 100644 (file)
index 0000000..3e5566c
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef MP1000_SEPROM_H
+#define MP1000_SEPROM_H
+
+/*
+ * mp1000-seprom.h
+ *
+ *
+ *  This file contains the Serial EEPROM definitions for the MP1000 board
+ *
+ *  Copyright (C) 2005 Comdial Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define COMMAND_ERASE          (0x1C0)
+#define COMMAND_ERASE_ALL      (0x120)
+#define COMMAND_WRITE_DISABLE  (0x100)
+#define COMMAND_WRITE_ENABLE   (0x130)
+#define COMMAND_READ           (0x180)
+#define COMMAND_WRITE          (0x140)
+#define COMMAND_WRITE_ALL      (0x110)
+
+//
+// Serial EEPROM data format
+//
+
+#define PACKED __attribute__ ((packed))
+
+typedef struct _EEPROM {
+       union {
+               unsigned char eprom_byte_data[128];
+               unsigned short eprom_short_data[64];
+               struct {
+                       unsigned char version PACKED;   // EEPROM Version "1" for now
+                       unsigned char box_id PACKED;    // Box ID (Standalone, SOHO, embedded, etc)
+                       unsigned char major_hw_version PACKED;  // Major Hardware version (Hex)
+                       unsigned char minor_hw_version PACKED;  // Minor Hardware Version (Hex)
+                       unsigned char mfg_id[3] PACKED; // Manufacturer ID (3 character Alphabetic)
+                       unsigned char mfg_serial_number[10] PACKED;     // Manufacturer Serial number
+                       unsigned char mfg_date[3] PACKED;       // Date of Mfg (Formatted YY:MM:DD)
+                       unsigned char country PACKED;   // Country of deployment
+                       unsigned char mac_Address[6] PACKED;    // MAC Address
+                       unsigned char oem_string[20] PACKED;    // OEM ID string
+                       unsigned short feature_bits1 PACKED;    // Feature Bits 1
+                       unsigned short feature_bits2 PACKED;    // Feature Bits 2
+                       unsigned char filler[75] PACKED;                // Unused/Undefined     \930\94 initialized
+                       unsigned short checksum PACKED;         // byte accumulated short checksum
+               } eprom_struct;
+       } variant;
+}  eeprom_struct;
+
+/* These settings must be mutually exclusive */
+#define        FEATURE_BITS1_DRAMSIZE_16MEG    0x0001  /* 0 signifies 4 MEG system */
+#define        FEATURE_BITS1_DRAMSIZE_8MEG     0x0002  /* 1 in bit 1 = 8MEG system */
+#define        FEATURE_BITS1_DRAMSIZE_64MEG    0x0004  /* 1 in bit 2 = 64MEG system */
+
+#define FEATURE_BITS1_CPUIS90MEG     0x0010
+
+extern void seprom_init(void);
+extern eeprom_struct* get_seprom_ptr(void);
+extern unsigned char* get_eeprom_mac_address(void);
+
+#endif /* MP1000_SEPROM_H */
+
index 70576b17f9224a15384cabe7217d8576c1374bbb..776f9d377057d002082257902721e3f3d52f6cf0 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffff
 
 /*
index 1f0afa2576218053b007813c7c6e9517fb89b198..9fe100c9d6be44f6675909a3da39fab51019c4e3 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffff
 
 
index 68814828c9a74f24cf752ac3064663cb8e5569c7..d3ccfd8172b716d1070c76273bfa11bef9580ca5 100644 (file)
@@ -14,7 +14,7 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
-#include <asm/arch/hardware.h>
+#include <asm/hardware.h>
 
 #define IO_SPACE_LIMIT 0xffffffff
 
index 28a4cca6a4cb30b2b730735e7ca1dab3e622158f..b191cdd05576affa5236b7cfb394bbe452e2e25a 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 #define __io(a)                ((void __iomem *)(a))
index be2716eeaa026153bb8eed94ac460198d3558a94..6f0947bc500d1de07e441afebece4f86dfd9b201 100644 (file)
 #define IO_SIZE                        0x0B000000                 // How much?
 #define IO_START               INTEGRATOR_HDR_BASE        // PA of IO
 
-/*
- * Similar to above, but for PCI addresses (memory, IO, Config and the
- * V3 chip itself).  WARNING: this has to mirror definitions in platform.h
- */
-#define PCI_MEMORY_VADDR        0xe8000000
-#define PCI_CONFIG_VADDR        0xec000000
-#define PCI_V3_VADDR            0xed000000
-#define PCI_IO_VADDR            0xee000000
-
 #define PCIO_BASE              PCI_IO_VADDR
 #define PCIMEM_BASE            PCI_MEMORY_VADDR
 
index fbea8be67d265ea470acdd0532c17ca38f8699da..31f2deab51b0d54d3cd8f891b0a56da01f0d3e34 100644 (file)
 
 #define IO_SPACE_LIMIT 0xffff
 
+/*
+ * WARNING: this has to mirror definitions in platform.h
+ */
+#define PCI_MEMORY_VADDR        0xe8000000
+#define PCI_CONFIG_VADDR        0xec000000
+#define PCI_V3_VADDR            0xed000000
+#define PCI_IO_VADDR            0xee000000
+
 #define __io(a)                        ((void __iomem *)(PCI_IO_VADDR + (a)))
 #define __mem_pci(a)           (a)
 #define __mem_isa(a)           ((a) + PCI_MEMORY_VADDR)
index 2761dfd8694dc0c84ec02fb89b34dafb1c1c3c28..f39046a6ab141decf904c44316ce10000be2f37f 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 #define __io(p)                        ((void __iomem *)(p))
index 3241cd6f0778e0384e188a945a8747f705f68750..7fbcdf9931eecfd7ac2d7e088a5fd56befee8616 100644 (file)
@@ -15,6 +15,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT         0xffffffff
 #define __mem_pci(a)           (a)
 
index 32aece069869b52c0aede2617fdbec8ee898fda1..def089d693d2534bbee460abe0ac3275b9a9c53e 100644 (file)
 #define        WDT_RESET_ENABLE                0x01000000
 
 
+/*
+ * MSF registers.  The IXP2400 and IXP2800 have somewhat different MSF
+ * units, but the registers that differ between the two don't overlap,
+ * so we can have one register list for both.
+ */
+#define IXP2000_MSF_REG(x)                     ((volatile unsigned long*)(IXP2000_MSF_VIRT_BASE + (x)))
+#define IXP2000_MSF_RX_CONTROL                 IXP2000_MSF_REG(0x0000)
+#define IXP2000_MSF_TX_CONTROL                 IXP2000_MSF_REG(0x0004)
+#define IXP2000_MSF_INTERRUPT_STATUS           IXP2000_MSF_REG(0x0008)
+#define IXP2000_MSF_INTERRUPT_ENABLE           IXP2000_MSF_REG(0x000c)
+#define IXP2000_MSF_CSIX_TYPE_MAP              IXP2000_MSF_REG(0x0010)
+#define IXP2000_MSF_FC_EGRESS_STATUS           IXP2000_MSF_REG(0x0014)
+#define IXP2000_MSF_FC_INGRESS_STATUS          IXP2000_MSF_REG(0x0018)
+#define IXP2000_MSF_HWM_CONTROL                        IXP2000_MSF_REG(0x0024)
+#define IXP2000_MSF_FC_STATUS_OVERRIDE         IXP2000_MSF_REG(0x0028)
+#define IXP2000_MSF_CLOCK_CONTROL              IXP2000_MSF_REG(0x002c)
+#define IXP2000_MSF_RX_PORT_MAP                        IXP2000_MSF_REG(0x0040)
+#define IXP2000_MSF_RBUF_ELEMENT_DONE          IXP2000_MSF_REG(0x0044)
+#define IXP2000_MSF_RX_MPHY_POLL_LIMIT         IXP2000_MSF_REG(0x0048)
+#define IXP2000_MSF_RX_CALENDAR_LENGTH         IXP2000_MSF_REG(0x0048)
+#define IXP2000_MSF_RX_THREAD_FREELIST_TIMEOUT_0       IXP2000_MSF_REG(0x0050)
+#define IXP2000_MSF_RX_THREAD_FREELIST_TIMEOUT_1       IXP2000_MSF_REG(0x0054)
+#define IXP2000_MSF_RX_THREAD_FREELIST_TIMEOUT_2       IXP2000_MSF_REG(0x0058)
+#define IXP2000_MSF_TX_SEQUENCE_0              IXP2000_MSF_REG(0x0060)
+#define IXP2000_MSF_TX_SEQUENCE_1              IXP2000_MSF_REG(0x0064)
+#define IXP2000_MSF_TX_SEQUENCE_2              IXP2000_MSF_REG(0x0068)
+#define IXP2000_MSF_TX_MPHY_POLL_LIMIT         IXP2000_MSF_REG(0x0070)
+#define IXP2000_MSF_TX_CALENDAR_LENGTH         IXP2000_MSF_REG(0x0070)
+#define IXP2000_MSF_RX_UP_CONTROL_0            IXP2000_MSF_REG(0x0080)
+#define IXP2000_MSF_RX_UP_CONTROL_1            IXP2000_MSF_REG(0x0084)
+#define IXP2000_MSF_RX_UP_CONTROL_2            IXP2000_MSF_REG(0x0088)
+#define IXP2000_MSF_RX_UP_CONTROL_3            IXP2000_MSF_REG(0x008c)
+#define IXP2000_MSF_TX_UP_CONTROL_0            IXP2000_MSF_REG(0x0090)
+#define IXP2000_MSF_TX_UP_CONTROL_1            IXP2000_MSF_REG(0x0094)
+#define IXP2000_MSF_TX_UP_CONTROL_2            IXP2000_MSF_REG(0x0098)
+#define IXP2000_MSF_TX_UP_CONTROL_3            IXP2000_MSF_REG(0x009c)
+#define IXP2000_MSF_TRAIN_DATA                 IXP2000_MSF_REG(0x00a0)
+#define IXP2000_MSF_TRAIN_CALENDAR             IXP2000_MSF_REG(0x00a4)
+#define IXP2000_MSF_TRAIN_FLOW_CONTROL         IXP2000_MSF_REG(0x00a8)
+#define IXP2000_MSF_TX_CALENDAR_0              IXP2000_MSF_REG(0x1000)
+#define IXP2000_MSF_RX_PORT_CALENDAR_STATUS    IXP2000_MSF_REG(0x1400)
+
+
 #endif                         /* _IXP2000_H_ */
index fc012a39e2cb7f8e465ff54d9f672ff1bea1aac8..cab8ad0adf09e3dcb60176b93aa1ef7c2ff7c720 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
-#include <asm/arch/hardware.h>
+#include <asm/hardware.h>
 
 #define IO_SPACE_LIMIT 0xffffffff
 
index c13bdd9add926453f510c3ee1a323a7559e34b15..bbcd4335f44196ea518d6c484a8c0254531cd91b 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef __ASM_ARCH_IO_H
 #define __ASM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /* No ISA or PCI bus on this machine. */
index 11fbf629bf755410a10efee0b52f1711b584b931..3d5bcd54508215ac5036cc7038743d933c35ae19 100644 (file)
@@ -34,6 +34,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index cf35721cfa453f4a853520fce7e42b3e6298a5f3..3e70bd95472ca8baa912995791b62c6e3bd8e545 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-# define __REG(x)      (*((volatile unsigned long *)io_p2v(x)))
+# define __REG(x)      (*((volatile u32 *)io_p2v(x)))
 
 /* With indexed regs we don't want to feed the index through io_p2v()
    especially if it is a variable, otherwise horrible code will result. */
 # define __REG2(x,y)   \
-       (*(volatile unsigned long *)((unsigned long)&__REG(x) + (y)))
+       (*(volatile u32 *)((u32)&__REG(x) + (y)))
 
 # define __PREG(x)     (io_v2p((u32)&(x)))
 
index c3bdbe44e21f020080795f6ab45a166aeafc8af2..eb2dd58d397fa187690093a618afcf49c83f7221 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
diff --git a/include/asm-arm/arch-pxa/irda.h b/include/asm-arm/arch-pxa/irda.h
new file mode 100644 (file)
index 0000000..748406f
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef ASMARM_ARCH_IRDA_H
+#define ASMARM_ARCH_IRDA_H
+
+/* board specific transceiver capabilities */
+
+#define IR_OFF         1
+#define IR_SIRMODE     2
+#define IR_FIRMODE     4
+
+struct pxaficp_platform_data {
+       int transceiver_cap;
+       void (*transceiver_mode)(struct device *dev, int mode);
+};
+
+extern void pxa_set_ficp_info(struct pxaficp_platform_data *info);
+
+#endif
index 3af7165ab0d738702eff0d17e43f3348d5158750..a75a2470f4f59a1e2f23bc5d9e8a5b7707b016f9 100644 (file)
 #define STDLL          __REG(0x40700000)  /* Divisor Latch Low Register (DLAB = 1) (read/write) */
 #define STDLH          __REG(0x40700004)  /* Divisor Latch High Register (DLAB = 1) (read/write) */
 
+/* Hardware UART (HWUART) */
+#define HWUART         HWRBR
+#define HWRBR          __REG(0x41600000)  /* Receive Buffer Register (read only) */
+#define HWTHR          __REG(0x41600000)  /* Transmit Holding Register (write only) */
+#define HWIER          __REG(0x41600004)  /* Interrupt Enable Register (read/write) */
+#define HWIIR          __REG(0x41600008)  /* Interrupt ID Register (read only) */
+#define HWFCR          __REG(0x41600008)  /* FIFO Control Register (write only) */
+#define HWLCR          __REG(0x4160000C)  /* Line Control Register (read/write) */
+#define HWMCR          __REG(0x41600010)  /* Modem Control Register (read/write) */
+#define HWLSR          __REG(0x41600014)  /* Line Status Register (read only) */
+#define HWMSR          __REG(0x41600018)  /* Modem Status Register (read only) */
+#define HWSPR          __REG(0x4160001C)  /* Scratch Pad Register (read/write) */
+#define HWISR          __REG(0x41600020)  /* Infrared Selection Register (read/write) */
+#define HWFOR          __REG(0x41600024)  /* Receive FIFO Occupancy Register (read only) */
+#define HWABR          __REG(0x41600028)  /* Auto-Baud Control Register (read/write) */
+#define HWACR          __REG(0x4160002C)  /* Auto-Baud Count Register (read only) */
+#define HWDLL          __REG(0x41600000)  /* Divisor Latch Low Register (DLAB = 1) (read/write) */
+#define HWDLH          __REG(0x41600004)  /* Divisor Latch High Register (DLAB = 1) (read/write) */
+
 #define IER_DMAE       (1 << 7)        /* DMA Requests Enable */
 #define IER_UUE                (1 << 6)        /* UART Unit Enable */
 #define IER_NRZE       (1 << 5)        /* NRZ coding Enable */
 #define ICCR0_LBM      (1 << 1)        /* Loopback mode */
 #define ICCR0_ITR      (1 << 0)        /* IrDA transmission */
 
-#ifdef CONFIG_PXA27x
 #define ICCR2_RXP       (1 << 3)       /* Receive Pin Polarity select */
 #define ICCR2_TXP       (1 << 2)       /* Transmit Pin Polarity select */
 #define ICCR2_TRIG     (3 << 0)        /* Receive FIFO Trigger threshold */
 #define ICCR2_TRIG_8    (0 << 0)       /*      >= 8 bytes */
 #define ICCR2_TRIG_16   (1 << 0)       /*      >= 16 bytes */
 #define ICCR2_TRIG_32   (2 << 0)       /*      >= 32 bytes */
-#endif
 
 #ifdef CONFIG_PXA27x
 #define ICSR0_EOC      (1 << 6)        /* DMA End of Descriptor Chain */
 #define GPIO40_FFDTR           40      /* FFUART data terminal Ready */
 #define GPIO41_FFRTS           41      /* FFUART request to send */
 #define GPIO42_BTRXD           42      /* BTUART receive data */
+#define GPIO42_HWRXD           42      /* HWUART receive data */
 #define GPIO43_BTTXD           43      /* BTUART transmit data */
+#define GPIO43_HWTXD           43      /* HWUART transmit data */
 #define GPIO44_BTCTS           44      /* BTUART clear to send */
+#define GPIO44_HWCTS           44      /* HWUART clear to send */
 #define GPIO45_BTRTS           45      /* BTUART request to send */
+#define GPIO45_HWRTS           45      /* HWUART request to send */
 #define GPIO45_AC97_SYSCLK     45      /* AC97 System Clock */
 #define GPIO46_ICPRXD          46      /* ICP receive data */
 #define GPIO46_STRXD           46      /* STD_UART receive data */
 #define GPIO40_FFDTR_MD                (40 | GPIO_ALT_FN_2_OUT)
 #define GPIO41_FFRTS_MD                (41 | GPIO_ALT_FN_2_OUT)
 #define GPIO42_BTRXD_MD                (42 | GPIO_ALT_FN_1_IN)
+#define GPIO42_HWRXD_MD                (42 | GPIO_ALT_FN_3_IN)
 #define GPIO43_BTTXD_MD                (43 | GPIO_ALT_FN_2_OUT)
+#define GPIO43_HWTXD_MD                (43 | GPIO_ALT_FN_3_OUT)
 #define GPIO44_BTCTS_MD                (44 | GPIO_ALT_FN_1_IN)
+#define GPIO44_HWCTS_MD                (44 | GPIO_ALT_FN_3_IN)
 #define GPIO45_BTRTS_MD                (45 | GPIO_ALT_FN_2_OUT)
+#define GPIO45_HWRTS_MD                (45 | GPIO_ALT_FN_3_OUT)
 #define GPIO45_SYSCLK_AC97_MD          (45 | GPIO_ALT_FN_1_OUT)
 #define GPIO46_ICPRXD_MD       (46 | GPIO_ALT_FN_1_IN)
 #define GPIO46_STRXD_MD                (46 | GPIO_ALT_FN_2_IN)
 #define GPIO47_ICPTXD_MD       (47 | GPIO_ALT_FN_2_OUT)
 #define GPIO47_STTXD_MD                (47 | GPIO_ALT_FN_1_OUT)
 #define GPIO48_nPOE_MD         (48 | GPIO_ALT_FN_2_OUT)
+#define GPIO48_HWTXD_MD         (48 | GPIO_ALT_FN_1_OUT)
+#define GPIO48_nPOE_MD          (48 | GPIO_ALT_FN_2_OUT)
+#define GPIO49_HWRXD_MD                (49 | GPIO_ALT_FN_1_IN)
 #define GPIO49_nPWE_MD         (49 | GPIO_ALT_FN_2_OUT)
 #define GPIO50_nPIOR_MD                (50 | GPIO_ALT_FN_2_OUT)
+#define GPIO50_HWCTS_MD         (50 | GPIO_ALT_FN_1_IN)
+#define GPIO51_HWRTS_MD         (51 | GPIO_ALT_FN_1_OUT)
 #define GPIO51_nPIOW_MD                (51 | GPIO_ALT_FN_2_OUT)
 #define GPIO52_nPCE_1_MD       (52 | GPIO_ALT_FN_2_OUT)
 #define GPIO53_nPCE_2_MD       (53 | GPIO_ALT_FN_2_OUT)
 #define CKEN7_BTUART   (1 << 7)        /* BTUART Unit Clock Enable */
 #define CKEN6_FFUART   (1 << 6)        /* FFUART Unit Clock Enable */
 #define CKEN5_STUART   (1 << 5)        /* STUART Unit Clock Enable */
+#define CKEN4_HWUART   (1 << 4)        /* HWUART Unit Clock Enable */
 #define CKEN4_SSP3     (1 << 4)        /* SSP3 Unit Clock Enable */
 #define CKEN3_SSP      (1 << 3)        /* SSP Unit Clock Enable */
 #define CKEN3_SSP2     (1 << 3)        /* SSP2 Unit Clock Enable */
 
 #endif
 
+/* PWRMODE register M field values */
+
+#define PWRMODE_IDLE           0x1
+#define PWRMODE_STANDBY                0x2
+#define PWRMODE_SLEEP          0x3
+#define PWRMODE_DEEPSLEEP      0x7
+
 #endif
index 4428d3eb743281ae8d337caab406f64f266914d6..fe38090444e0b4c3de7828a69434588b5bb5065f 100644 (file)
@@ -12,6 +12,7 @@
 #define FFUART         ((volatile unsigned long *)0x40100000)
 #define BTUART         ((volatile unsigned long *)0x40200000)
 #define STUART         ((volatile unsigned long *)0x40700000)
+#define HWUART         ((volatile unsigned long *)0x41600000)
 
 #define UART           FFUART
 
index 24453c405a87f56effde8b12ec6ae0cf17616b2a..b4da08d7a336f6a3de5bef5d480629860d0f5697 100644 (file)
@@ -13,6 +13,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index ac57bc887d82ff793a42b373fc931f39fab2d39d..4790491ba9d0b71de50e96635859181fed4e4b9a 100644 (file)
@@ -13,6 +13,7 @@
  *     07-Sep-2004     RTP     Created file
  *     03-Nov-2004     BJD     Updated and minor cleanups
  *     03-Aug-2005     RTP     Renamed to fb.h
+ *     26-Oct-2005     BJD     Changed name of platdata init
 */
 
 #ifndef __ASM_ARM_FB_H
@@ -64,6 +65,6 @@ struct s3c2410fb_mach_info {
        unsigned long   lpcsel;
 };
 
-void __init set_s3c2410fb_info(struct s3c2410fb_mach_info *hard_s3c2410fb_info);
+extern void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *);
 
 #endif /* __ASM_ARM_FB_H */
index 4bf272ed9add61debe8b01c6b8dad75d3fac53f8..16fbc8afffd907cb7f333c3bc57fb98b02fcc09d 100644 (file)
@@ -15,6 +15,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index 2053cbacffc3d8c8c40523f63a2fb465dcb88888..cb33d57c146c09fab2268f0daf9aae493098a09a 100644 (file)
@@ -20,6 +20,7 @@
  *    18-11-2004     BJD     Added S3C2440 AC97 controls
  *    10-Mar-2005    LCVR    Changed S3C2410_VA to S3C24XX_VA
  *    28-Mar-2005    LCVR    Fixed definition of GPB10
+ *    26-Oct-2005    BJD     Added generic configuration types
 */
 
 
 /* general configuration options */
 
 #define S3C2410_GPIO_LEAVE   (0xFFFFFFFF)
+#define S3C2410_GPIO_INPUT   (0xFFFFFFF0)
+#define S3C2410_GPIO_OUTPUT  (0xFFFFFFF1)
+#define S3C2410_GPIO_IRQ     (0xFFFFFFF2)      /* not available for all */
+#define S3C2410_GPIO_SFN2    (0xFFFFFFF2)      /* not available on A */
+#define S3C2410_GPIO_SFN3    (0xFFFFFFF3)      /* not available on A */
 
 /* configure GPIO ports A..G */
 
index 19c3b1e186bb6f8774704f138f10cb814fce3af4..28711aaa4968b5daa5882f905780ed120793fdee 100644 (file)
 #define UNCACHEABLE_ADDR       0xfa050000
 
 
-/*
- * We requires absolute addresses i.e. (PCMCIA_IO_0_BASE + 0x3f8) for 
- * in*()/out*() macros to be usable for all cases.
- */
-#define PCIO_BASE              0
-
-
 /*
  * SA1100 internal I/O mappings
  *
index 7d969ffbd3bb04965a276e83c5cd032df392ea11..9d4fe6cf205b1fb9e272e19091c8a902b59aa8ec 100644 (file)
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
  * We don't actually have real ISA nor PCI buses, but there is so many 
  * drivers out there that might just work if we fake them...
  */
-#define __io(a)                        ((void __iomem *)(PCIO_BASE + (a)))
+static inline void __iomem *__io(unsigned long addr)
+{
+       return (void __iomem *)addr;
+}
+#define __io(a)                        __io(a)
 #define __mem_pci(a)           (a)
 #define __mem_isa(a)           (a)
 
index 6f52118ba1a47a9da2cb8bbeb00266e7967d244a..0f0612f79b2b0924476c0433add323028aa0372e 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (c) 1999 Nicolas Pitre <nico@cam.org>
  */
 #include <linux/config.h>
+#include <asm/hardware.h>
 
 static inline void arch_idle(void)
 {
index 5e6ed0038b2b438667a35b1f4e60b3883e08c1e0..87ffa27f296247344d96d4835deea07534a36eaa 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <asm/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index d62ade4e4cbb24facf3b95a3774e4ec508ba6001..e3e8541ee63b07f57d915e5409e279d0f09ce2c6 100644 (file)
@@ -70,7 +70,7 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
  * device-viewed address.
  */
 extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
 
 /**
  * dma_free_coherent - free memory allocated by dma_alloc_coherent
@@ -117,7 +117,7 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  * device-viewed address.
  */
 extern void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
 
 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
        dma_free_coherent(dev,size,cpu_addr,handle)
index 5c4ae8f5dbb076ad2e46ea8be9c48acb1bc56879..2e6799632f124ff6e43d86cddfec5dec1aa7ad89 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/memory.h>
-#include <asm/arch/hardware.h>
 
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
index 4fa95084a8c0459f58b87a9e36f75019069fb0c6..7273c6fd95b53d39a3bae2e882c134e789030fcb 100644 (file)
@@ -48,10 +48,10 @@ struct machine_desc {
  * Set of macros to define architecture features.  This is built into
  * a table by the linker.
  */
-#define MACHINE_START(_type,_name)             \
-const struct machine_desc __mach_desc_##_type  \
+#define MACHINE_START(_type,_name)                     \
+static const struct machine_desc __mach_desc_##_type   \
  __attribute__((__section__(".arch.info.init"))) = {   \
-       .nr             = MACH_TYPE_##_type,    \
+       .nr             = MACH_TYPE_##_type,            \
        .name           = _name,
 
 #define MACHINE_END                            \
index 9ac47cf8d2e4e8fa416e516b55473b5ce7e2c144..0619522bd92684e599c349e11809ac62403bc349 100644 (file)
@@ -11,7 +11,7 @@
  */
 struct map_desc {
        unsigned long virtual;
-       unsigned long physical;
+       unsigned long pfn;
        unsigned long length;
        unsigned int type;
 };
@@ -27,6 +27,9 @@ struct meminfo;
 #define MT_ROM                 6
 #define MT_IXP2000_DEVICE      7
 
+#define        __phys_to_pfn(paddr)    (paddr >> PAGE_SHIFT)
+#define        __pfn_to_phys(pfn)      (pfn << PAGE_SHIFT)
+
 extern void create_memmap_holes(struct meminfo *);
 extern void memtable_init(struct meminfo *);
 extern void iotable_init(struct map_desc *, int);
index 0b5c3fdaefe1f14941d9e20b52feb7dab2eb847d..8eff51349ae75235f59764974031bf73f9705210 100644 (file)
 
 #ifdef CONFIG_PCI
 void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, int flag);
+                          dma_addr_t *dma_handle, gfp_t flag);
 
 void dma_free_coherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle);
 #else
 static inline void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                   int flag)
+                   gfp_t flag)
 {
         BUG();
         return NULL;
index 0206ab35eae08babea7c12f7c4fdf170b17b71e9..5003e017fd1ecae1905950c1fb49f75e8aca37c1 100644 (file)
@@ -13,7 +13,7 @@
 extern unsigned long __nongprelbss dma_coherent_mem_start;
 extern unsigned long __nongprelbss dma_coherent_mem_end;
 
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp);
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
 void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
 
 /*
index b4efe5e3591a5b22a7f511b606c13c4d3cf4b0ce..1168451c275fb1603892630bf3510a6899dace0b 100644 (file)
@@ -32,7 +32,7 @@ extern void pcibios_set_master(struct pci_dev *dev);
 extern void pcibios_penalize_isa_irq(int irq);
 
 #ifdef CONFIG_MMU
-extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
+extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
 extern void consistent_free(void *vaddr);
 extern void consistent_sync(void *vaddr, size_t size, int direction);
 extern void consistent_sync_page(struct page *page, unsigned long offset,
index fd9de9502dff09f52df54458f3d5fd0265372298..a7f1a55ce6b0a07b2bf7850e4dda29c3eb88d67d 100644 (file)
@@ -6,7 +6,7 @@
 
 static inline void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  int flag)
+                  gfp_t flag)
 {
        BUG();
        return NULL;
index 79e89a7db5665563aa265523c756ea4763fe3e8a..a2f6ac5aef7d9d2a4a0cafb70fcafac6aef0e1ee 100644 (file)
@@ -37,7 +37,7 @@ typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
 
 /* DMA-mapping interface: */
 typedef void ia64_mv_dma_init (void);
-typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
+typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
 typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
 typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
 typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
index 3a2db28834b68d84bcd47d9c751cb703706363bc..a7fa0302bda7e0a5258a506254a701bf37d59fed 100644 (file)
@@ -8,7 +8,7 @@
 
 static inline void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  int flag)
+                  gfp_t flag)
 {
        return (void *)NULL;
 }
index af28dc88930b40aa59c6c6102e46ac7d5e62ec09..43288634c38a7bbca2793d295f801de22ffaf2ea 100644 (file)
@@ -5,13 +5,13 @@
 #include <asm/cache.h>
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, int flag);
+                          dma_addr_t *dma_handle, gfp_t flag);
 
 void dma_free_noncoherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle);
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, int flag);
+                          dma_addr_t *dma_handle, gfp_t flag);
 
 void dma_free_coherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle);
index ac3dfc7af5b0e9b78a4d663e8d8855fe2096eb9c..fcec52bafb25ddd01c4c6589a33d5183c3cbe325 100644 (file)
@@ -128,26 +128,26 @@ struct hpc3_ethregs {
        volatile u32 rx_gfptr;  /* current GIO fifo ptr */
        volatile u32 rx_dfptr;  /* current device fifo ptr */
        u32 _unused1;           /* padding */
-       volatile u32 rx_reset;  /* reset register */
-#define HPC3_ERXRST_CRESET 0x1 /* Reset dma channel and external controller */
-#define HPC3_ERXRST_CLRIRQ 0x2 /* Clear channel interrupt */
-#define HPC3_ERXRST_LBACK  0x4 /* Enable diagnostic loopback mode of Seeq8003 */
-
-       volatile u32 rx_dconfig;        /* DMA configuration register */
-#define HPC3_ERXDCFG_D1    0x0000f /* Cycles to spend in D1 state for PIO */
-#define HPC3_ERXDCFG_D2    0x000f0 /* Cycles to spend in D2 state for PIO */
-#define HPC3_ERXDCFG_D3    0x00f00 /* Cycles to spend in D3 state for PIO */
-#define HPC3_ERXDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
-#define HPC3_ERXDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
-#define HPC3_ERXDCFG_FEOP  0x04000 /* Bad packet marker timeout enable */
-#define HPC3_ERXDCFG_FIRQ  0x08000 /* Another bad packet timeout enable */
-#define HPC3_ERXDCFG_PTO   0x30000 /* Programmed timeout value for above two */
-
-       volatile u32 rx_pconfig;        /* PIO configuration register */
-#define HPC3_ERXPCFG_P1    0x000f /* Cycles to spend in P1 state for PIO */
-#define HPC3_ERXPCFG_P2    0x00f0 /* Cycles to spend in P2 state for PIO */
-#define HPC3_ERXPCFG_P3    0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_ERXPCFG_TST   0x1000 /* Diagnistic ram test feature bit */
+       volatile u32 reset;     /* reset register */
+#define HPC3_ERST_CRESET 0x1   /* Reset dma channel and external controller */
+#define HPC3_ERST_CLRIRQ 0x2   /* Clear channel interrupt */
+#define HPC3_ERST_LBACK  0x4   /* Enable diagnostic loopback mode of Seeq8003 */
+
+       volatile u32 dconfig;    /* DMA configuration register */
+#define HPC3_EDCFG_D1    0x0000f /* Cycles to spend in D1 state for PIO */
+#define HPC3_EDCFG_D2    0x000f0 /* Cycles to spend in D2 state for PIO */
+#define HPC3_EDCFG_D3    0x00f00 /* Cycles to spend in D3 state for PIO */
+#define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
+#define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
+#define HPC3_EDCFG_FEOP  0x04000 /* Bad packet marker timeout enable */
+#define HPC3_EDCFG_FIRQ  0x08000 /* Another bad packet timeout enable */
+#define HPC3_EDCFG_PTO   0x30000 /* Programmed timeout value for above two */
+
+       volatile u32 pconfig;   /* PIO configuration register */
+#define HPC3_EPCFG_P1    0x000f /* Cycles to spend in P1 state for PIO */
+#define HPC3_EPCFG_P2    0x00f0 /* Cycles to spend in P2 state for PIO */
+#define HPC3_EPCFG_P3    0x0f00 /* Cycles to spend in P3 state for PIO */
+#define HPC3_EPCFG_TST   0x1000 /* Diagnistic ram test feature bit */
 
        u32 _unused2[0x1000/4 - 8];     /* padding */
 
index 4db84f969e9eb7543d7234365a8a9a89058e0028..74d4ac6f2151ebfdece63746f6d93fd1e3bde8c3 100644 (file)
@@ -9,8 +9,8 @@
 /* See Documentation/DMA-mapping.txt */
 struct hppa_dma_ops {
        int  (*dma_supported)(struct device *dev, u64 mask);
-       void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, int flag);
-       void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, int flag);
+       void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+       void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
        void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
        dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
        void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
@@ -49,14 +49,14 @@ extern struct hppa_dma_ops *hppa_dma_ops;
 
 static inline void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  int flag)
+                  gfp_t flag)
 {
        return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
 }
 
 static inline void *
 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                     int flag)
+                     gfp_t flag)
 {
        return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
 }
index 061bfcac1bf1c359802f9f755b776d92b3b78e22..6e9635114433a94e2a08b934487f6dd82e0b8cf5 100644 (file)
@@ -19,7 +19,7 @@
  * allocate the space "normally" and use the cache management functions
  * to ensure it is consistent.
  */
-extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp);
+extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
 extern void __dma_free_coherent(size_t size, void *vaddr);
 extern void __dma_sync(void *vaddr, size_t size, int direction);
 extern void __dma_sync_page(struct page *page, unsigned long offset,
index 80d164c1529efb121726108bf7d24fc44d8bbcde..d3fa5c2b889d270139407f9cba31bddfaf58ab82 100644 (file)
@@ -9,7 +9,7 @@
 extern struct bus_type pci_bus_type;
 
 /* arch/sh/mm/consistent.c */
-extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
+extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
 extern void consistent_free(void *vaddr, size_t size);
 extern void consistent_sync(void *vaddr, size_t size, int direction);
 
@@ -26,7 +26,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
 }
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, int flag)
+                        dma_addr_t *dma_handle, gfp_t flag)
 {
        if (sh_mv.mv_consistent_alloc) {
                void *ret;
index 5771f4baa47871fdd4623e20a631ed1ff076e3c2..3f18aa18051693e78f810c85eb71f3d0a55ebfa6 100644 (file)
@@ -64,7 +64,7 @@ struct sh_machine_vector
 
        void (*mv_heartbeat)(void);
 
-       void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, int);
+       void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, gfp_t);
        int (*mv_consistent_free)(struct device *, size_t, void *, dma_addr_t);
 };
 
index b8d26fe677f4f92cbedd09fff90ea70d1af44c4b..cc9a2e86f5b413d566f81c07781596cc4a7b5b65 100644 (file)
@@ -25,7 +25,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
 }
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, int flag)
+                        dma_addr_t *dma_handle, gfp_t flag)
 {
        return consistent_alloc(NULL, size, dma_handle);
 }
index 2dc5bb8effa6ff1d3d592f53a86e3c76f1f43de2..d7c3b0f0a90103de71853fbdaa5aedf380197c49 100644 (file)
@@ -8,7 +8,7 @@
 #else
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, int flag)
+                        dma_addr_t *dma_handle, gfp_t flag)
 {
        BUG();
        return NULL;
index 1c5da41653a44c1ed21dd6d5d554958ea67b51cc..c7d5804ba76df1a1e458746cea57a828cbe93ac2 100644 (file)
@@ -10,7 +10,7 @@
 struct device;
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, int flag)
+                        dma_addr_t *dma_handle, gfp_t flag)
 {
        BUG();
        return NULL;
index 13e6291f71511dbb05248ce644a29f3e6597c8d0..babd2989511465887973098e1619e6d20a067669 100644 (file)
@@ -19,7 +19,7 @@ dma_set_mask(struct device *dev, u64 dma_mask)
 
 static inline void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  int flag)
+                  gfp_t flag)
 {
        BUG();
        return((void *) 0);
index 2c192abe9aeb0d29ac9ce1946214bc697af7c170..0229814af31e6bedad5d66381187d21f31f85796 100644 (file)
@@ -115,7 +115,7 @@ extern unsigned long uml_physmem;
 #define pfn_valid(pfn) ((pfn) < max_mapnr)
 #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
 
-extern struct page *arch_validate(struct page *page, int mask, int order);
+extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
 #define HAVE_ARCH_VALIDATE
 
 extern void arch_free_page(struct page *page, int order);
index e784fdc524f1ac7be2dce7930995cccab58dc0f6..54a380efed413fda6d8c0bf155d4f2ace1a6753c 100644 (file)
@@ -17,7 +17,7 @@ extern dma_addr_t bad_dma_address;
        (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
 
 void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                        unsigned gfp);
+                        gfp_t gfp);
 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
                         dma_addr_t dma_handle);
 
index 36293061f4ed6b1416aac1604417d73fbd157765..7cbfd10ecc3c1c49ba70fde51d1a7efefeb264cf 100644 (file)
@@ -27,7 +27,7 @@ extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
                         int nents, int direction);
 extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
 extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size,
-                                    dma_addr_t *dma_handle, int flags);
+                                    dma_addr_t *dma_handle, gfp_t flags);
 extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
                                   void *vaddr, dma_addr_t dma_handle);
 
index e86a206f12093313335e326af2fb9af63fb24d42..c425f10d086a500c9ca45ca63df857bef16e3f09 100644 (file)
@@ -28,7 +28,7 @@ extern void consistent_sync(void*, size_t, int);
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
 void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, int flag);
+                          dma_addr_t *dma_handle, gfp_t flag);
 
 void dma_free_coherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle);
index a5b74efab0679ae2a1f17dd6329596bf89c531ea..d2873b732bb1255ac08515d21e3ca832f601a058 100644 (file)
@@ -42,13 +42,18 @@ enum {
        ATA_SECT_SIZE           = 512,
 
        ATA_ID_WORDS            = 256,
-       ATA_ID_PROD_OFS         = 27,
-       ATA_ID_FW_REV_OFS       = 23,
        ATA_ID_SERNO_OFS        = 10,
-       ATA_ID_MAJOR_VER        = 80,
-       ATA_ID_PIO_MODES        = 64,
+       ATA_ID_FW_REV_OFS       = 23,
+       ATA_ID_PROD_OFS         = 27,
+       ATA_ID_OLD_PIO_MODES    = 51,
+       ATA_ID_FIELD_VALID      = 53,
        ATA_ID_MWDMA_MODES      = 63,
+       ATA_ID_PIO_MODES        = 64,
+       ATA_ID_EIDE_DMA_MIN     = 65,
+       ATA_ID_EIDE_PIO         = 67,
+       ATA_ID_EIDE_PIO_IORDY   = 68,
        ATA_ID_UDMA_MODES       = 88,
+       ATA_ID_MAJOR_VER        = 80,
        ATA_ID_PIO4             = (1 << 1),
 
        ATA_PCI_CTL_OFS         = 2,
@@ -128,10 +133,15 @@ enum {
        ATA_CMD_PIO_READ_EXT    = 0x24,
        ATA_CMD_PIO_WRITE       = 0x30,
        ATA_CMD_PIO_WRITE_EXT   = 0x34,
+       ATA_CMD_READ_MULTI      = 0xC4,
+       ATA_CMD_READ_MULTI_EXT  = 0x29,
+       ATA_CMD_WRITE_MULTI     = 0xC5,
+       ATA_CMD_WRITE_MULTI_EXT = 0x39,
        ATA_CMD_SET_FEATURES    = 0xEF,
        ATA_CMD_PACKET          = 0xA0,
        ATA_CMD_VERIFY          = 0x40,
        ATA_CMD_VERIFY_EXT      = 0x42,
+       ATA_CMD_INIT_DEV_PARAMS = 0x91,
 
        /* SETFEATURES stuff */
        SETFEATURES_XFER        = 0x03,
@@ -146,14 +156,14 @@ enum {
        XFER_MW_DMA_2           = 0x22,
        XFER_MW_DMA_1           = 0x21,
        XFER_MW_DMA_0           = 0x20,
+       XFER_SW_DMA_2           = 0x12,
+       XFER_SW_DMA_1           = 0x11,
+       XFER_SW_DMA_0           = 0x10,
        XFER_PIO_4              = 0x0C,
        XFER_PIO_3              = 0x0B,
        XFER_PIO_2              = 0x0A,
        XFER_PIO_1              = 0x09,
        XFER_PIO_0              = 0x08,
-       XFER_SW_DMA_2           = 0x12,
-       XFER_SW_DMA_1           = 0x11,
-       XFER_SW_DMA_0           = 0x10,
        XFER_PIO_SLOW           = 0x00,
 
        /* ATAPI stuff */
@@ -181,6 +191,7 @@ enum {
        ATA_TFLAG_ISADDR        = (1 << 1), /* enable r/w to nsect/lba regs */
        ATA_TFLAG_DEVICE        = (1 << 2), /* enable r/w to device reg */
        ATA_TFLAG_WRITE         = (1 << 3), /* data dir: host->dev==1 (write) */
+       ATA_TFLAG_LBA           = (1 << 4), /* enable LBA */
 };
 
 enum ata_tf_protocols {
@@ -250,7 +261,19 @@ struct ata_taskfile {
          ((u64) (id)[(n) + 1] << 16) | \
          ((u64) (id)[(n) + 0]) )
 
-static inline int atapi_cdb_len(u16 *dev_id)
+static inline int ata_id_current_chs_valid(const u16 *id)
+{
+       /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 
+          has not been issued to the device then the values of 
+          id[54] to id[56] are vendor specific. */
+       return (id[53] & 0x01) && /* Current translation valid */
+               id[54] &&  /* cylinders in current translation */
+               id[55] &&  /* heads in current translation */
+               id[55] <= 16 &&
+               id[56];    /* sectors in current translation */
+}
+
+static inline int atapi_cdb_len(const u16 *dev_id)
 {
        u16 tmp = dev_id[0] & 0x3;
        switch (tmp) {
@@ -260,7 +283,7 @@ static inline int atapi_cdb_len(u16 *dev_id)
        }
 }
 
-static inline int is_atapi_taskfile(struct ata_taskfile *tf)
+static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
 {
        return (tf->protocol == ATA_PROT_ATAPI) ||
               (tf->protocol == ATA_PROT_ATAPI_NODATA) ||
index b2a2509bd7ea35b02874d034e5928fbea18ac8c1..da3c01955f3d8eea8cacd69d9ef9485517493bf2 100644 (file)
@@ -260,11 +260,11 @@ extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
                                /* Public API */
-extern void                audit_log(struct audit_context *ctx, int gfp_mask,
+extern void                audit_log(struct audit_context *ctx, gfp_t gfp_mask,
                                      int type, const char *fmt, ...)
                                      __attribute__((format(printf,4,5)));
 
-extern struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, int type);
+extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type);
 extern void                audit_log_format(struct audit_buffer *ab,
                                             const char *fmt, ...)
                            __attribute__((format(printf,2,3)));
index 3344b4e8e43a1acc8e90e6de6600c6269aa14ff2..685fd3720df5b2105c86d3be588eb9cf0061b360 100644 (file)
@@ -301,7 +301,7 @@ extern struct bio *bio_map_user_iov(struct request_queue *,
                                    struct sg_iovec *, int, int);
 extern void bio_unmap_user(struct bio *);
 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
-                               unsigned int);
+                               gfp_t);
 extern void bio_set_pages_dirty(struct bio *bio);
 extern void bio_check_pages_dirty(struct bio *bio);
 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
index efdc9b5bc05c8687380200f8a871975c91709860..025a7f084dbd82b2caf7a290736287bc3f012d37 100644 (file)
@@ -96,8 +96,8 @@ struct io_context {
 
 void put_io_context(struct io_context *ioc);
 void exit_io_context(void);
-struct io_context *current_io_context(int gfp_flags);
-struct io_context *get_io_context(int gfp_flags);
+struct io_context *current_io_context(gfp_t gfp_flags);
+struct io_context *get_io_context(gfp_t gfp_flags);
 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
 
@@ -107,9 +107,9 @@ typedef void (rq_end_io_fn)(struct request *);
 struct request_list {
        int count[2];
        int starved[2];
+       int elvpriv;
        mempool_t *rq_pool;
        wait_queue_head_t wait[2];
-       wait_queue_head_t drain;
 };
 
 #define BLK_MAX_CDB    16
@@ -203,6 +203,7 @@ struct request {
 enum rq_flag_bits {
        __REQ_RW,               /* not set, read. set, write */
        __REQ_FAILFAST,         /* no low level driver retries */
+       __REQ_SORTED,           /* elevator knows about this request */
        __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
        __REQ_HARDBARRIER,      /* may not be passed by drive either */
        __REQ_CMD,              /* is a regular fs rw request */
@@ -210,6 +211,7 @@ enum rq_flag_bits {
        __REQ_STARTED,          /* drive already may have started this one */
        __REQ_DONTPREP,         /* don't call prep for this one */
        __REQ_QUEUED,           /* uses queueing */
+       __REQ_ELVPRIV,          /* elevator private data attached */
        /*
         * for ATA/ATAPI devices
         */
@@ -235,6 +237,7 @@ enum rq_flag_bits {
 
 #define REQ_RW         (1 << __REQ_RW)
 #define REQ_FAILFAST   (1 << __REQ_FAILFAST)
+#define REQ_SORTED     (1 << __REQ_SORTED)
 #define REQ_SOFTBARRIER        (1 << __REQ_SOFTBARRIER)
 #define REQ_HARDBARRIER        (1 << __REQ_HARDBARRIER)
 #define REQ_CMD                (1 << __REQ_CMD)
@@ -242,6 +245,7 @@ enum rq_flag_bits {
 #define REQ_STARTED    (1 << __REQ_STARTED)
 #define REQ_DONTPREP   (1 << __REQ_DONTPREP)
 #define REQ_QUEUED     (1 << __REQ_QUEUED)
+#define REQ_ELVPRIV    (1 << __REQ_ELVPRIV)
 #define REQ_PC         (1 << __REQ_PC)
 #define REQ_BLOCK_PC   (1 << __REQ_BLOCK_PC)
 #define REQ_SENSE      (1 << __REQ_SENSE)
@@ -332,6 +336,12 @@ struct request_queue
        prepare_flush_fn        *prepare_flush_fn;
        end_flush_fn            *end_flush_fn;
 
+       /*
+        * Dispatch queue sorting
+        */
+       sector_t                end_sector;
+       struct request          *boundary_rq;
+
        /*
         * Auto-unplugging state
         */
@@ -354,7 +364,7 @@ struct request_queue
         * queue needs bounce pages for pages above this limit
         */
        unsigned long           bounce_pfn;
-       unsigned int            bounce_gfp;
+       gfp_t                   bounce_gfp;
 
        /*
         * various queue flags, see QUEUE_* below
@@ -405,8 +415,6 @@ struct request_queue
        unsigned int            sg_reserved_size;
        int                     node;
 
-       struct list_head        drain_list;
-
        /*
         * reserved for flush operations
         */
@@ -434,7 +442,7 @@ enum {
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
 #define QUEUE_FLAG_REENTER     6       /* Re-entrancy avoidance */
 #define QUEUE_FLAG_PLUGGED     7       /* queue is plugged */
-#define QUEUE_FLAG_DRAIN       8       /* draining queue for sched switch */
+#define QUEUE_FLAG_ELVSWITCH   8       /* don't use elevator, just do FIFO */
 #define QUEUE_FLAG_FLUSH       9       /* doing barrier flush sequence */
 
 #define blk_queue_plugged(q)   test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
@@ -454,6 +462,7 @@ enum {
 #define blk_pm_request(rq)     \
        ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
 
+#define blk_sorted_rq(rq)      ((rq)->flags & REQ_SORTED)
 #define blk_barrier_rq(rq)     ((rq)->flags & REQ_HARDBARRIER)
 #define blk_barrier_preflush(rq)       ((rq)->flags & REQ_BAR_PREFLUSH)
 #define blk_barrier_postflush(rq)      ((rq)->flags & REQ_BAR_POSTFLUSH)
@@ -550,7 +559,7 @@ extern void generic_make_request(struct bio *bio);
 extern void blk_put_request(struct request *);
 extern void blk_end_sync_rq(struct request *rq);
 extern void blk_attempt_remerge(request_queue_t *, struct request *);
-extern struct request *blk_get_request(request_queue_t *, int, int);
+extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
 extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
 extern void blk_requeue_request(request_queue_t *, struct request *);
 extern void blk_plug_device(request_queue_t *);
@@ -565,7 +574,7 @@ extern void blk_run_queue(request_queue_t *);
 extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
 extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
 extern int blk_rq_unmap_user(struct bio *, unsigned int);
-extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int);
+extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
 extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
 extern int blk_execute_rq(request_queue_t *, struct gendisk *,
                          struct request *, int);
@@ -611,12 +620,21 @@ extern void end_request(struct request *req, int uptodate);
 
 static inline void blkdev_dequeue_request(struct request *req)
 {
-       BUG_ON(list_empty(&req->queuelist));
+       elv_dequeue_request(req->q, req);
+}
 
-       list_del_init(&req->queuelist);
+/*
+ * This should be in elevator.h, but that requires pulling in rq and q
+ */
+static inline void elv_dispatch_add_tail(struct request_queue *q,
+                                        struct request *rq)
+{
+       if (q->last_merge == rq)
+               q->last_merge = NULL;
 
-       if (req->rl)
-               elv_remove_request(req->q, req);
+       q->end_sector = rq_end_sector(rq);
+       q->boundary_rq = rq;
+       list_add_tail(&rq->queuelist, &q->queue_head);
 }
 
 /*
@@ -650,12 +668,10 @@ extern void blk_dump_rq_flags(struct request *, char *);
 extern void generic_unplug_device(request_queue_t *);
 extern void __generic_unplug_device(request_queue_t *);
 extern long nr_blockdev_pages(void);
-extern void blk_wait_queue_drained(request_queue_t *, int);
-extern void blk_finish_queue_drain(request_queue_t *);
 
 int blk_get_queue(request_queue_t *);
-request_queue_t *blk_alloc_queue(int gfp_mask);
-request_queue_t *blk_alloc_queue_node(int,int);
+request_queue_t *blk_alloc_queue(gfp_t);
+request_queue_t *blk_alloc_queue_node(gfp_t, int);
 #define blk_put_queue(q) blk_cleanup_queue((q))
 
 /*
index 6a1d154c082523dc934021afe1e36445a5fc75ac..88af42f5e04a8fc6ebdc963f4ccf51a01f689dae 100644 (file)
@@ -188,7 +188,7 @@ extern int buffer_heads_over_limit;
  * Generic address_space_operations implementations for buffer_head-backed
  * address_spaces.
  */
-int try_to_release_page(struct page * page, int gfp_mask);
+int try_to_release_page(struct page * page, gfp_t gfp_mask);
 int block_invalidatepage(struct page *page, unsigned long offset);
 int block_write_full_page(struct page *page, get_block_t *get_block,
                                struct writeback_control *wbc);
index 04fa7dff079c42225eb4caf97d4f29cfe65963e8..300d704bdb9a5416b781b955f0c7172a6451d269 100644 (file)
@@ -37,8 +37,6 @@
 #include <linux/cycx_x25.h>
 #endif
 
-#define        is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0)
-
 /* Adapter Data Space.
  * This structure is needed because we handle multiple cards, otherwise
  * static data would do it.
index 6621df86a7487745fca4b447900b59431f5d3b2e..12fe6b0bfcff8e45de8b52b2710543a4a7da7c20 100644 (file)
@@ -60,6 +60,5 @@ extern int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len);
 extern int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len);
 extern int cycx_exec(void __iomem *addr);
 
-extern void cycx_inten(struct cycx_hw *hw);
 extern void cycx_intr(struct cycx_hw *hw);
 #endif /* _CYCX_DRV_H */
index ea6bbc2d7407e9fb48de7c67756e4463bba82970..a74c27e460bae3d0baf94b0c3a89d5a6c55352ac 100644 (file)
@@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc
 
 typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
 
-typedef struct request *(elevator_next_req_fn) (request_queue_t *);
+typedef int (elevator_dispatch_fn) (request_queue_t *, int);
 
-typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int);
+typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
 typedef int (elevator_queue_empty_fn) (request_queue_t *);
-typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
-typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
 typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
 typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
 typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *);
 
-typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int);
+typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, gfp_t);
 typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
+typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
 typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
 
 typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
@@ -31,10 +30,9 @@ struct elevator_ops
        elevator_merged_fn *elevator_merged_fn;
        elevator_merge_req_fn *elevator_merge_req_fn;
 
-       elevator_next_req_fn *elevator_next_req_fn;
+       elevator_dispatch_fn *elevator_dispatch_fn;
        elevator_add_req_fn *elevator_add_req_fn;
-       elevator_remove_req_fn *elevator_remove_req_fn;
-       elevator_requeue_req_fn *elevator_requeue_req_fn;
+       elevator_activate_req_fn *elevator_activate_req_fn;
        elevator_deactivate_req_fn *elevator_deactivate_req_fn;
 
        elevator_queue_empty_fn *elevator_queue_empty_fn;
@@ -81,15 +79,15 @@ struct elevator_queue
 /*
  * block elevator interface
  */
+extern void elv_dispatch_sort(request_queue_t *, struct request *);
 extern void elv_add_request(request_queue_t *, struct request *, int, int);
 extern void __elv_add_request(request_queue_t *, struct request *, int, int);
 extern int elv_merge(request_queue_t *, struct request **, struct bio *);
 extern void elv_merge_requests(request_queue_t *, struct request *,
                               struct request *);
 extern void elv_merged_request(request_queue_t *, struct request *);
-extern void elv_remove_request(request_queue_t *, struct request *);
+extern void elv_dequeue_request(request_queue_t *, struct request *);
 extern void elv_requeue_request(request_queue_t *, struct request *);
-extern void elv_deactivate_request(request_queue_t *, struct request *);
 extern int elv_queue_empty(request_queue_t *);
 extern struct request *elv_next_request(struct request_queue *q);
 extern struct request *elv_former_request(request_queue_t *, struct request *);
@@ -98,7 +96,7 @@ extern int elv_register_queue(request_queue_t *q);
 extern void elv_unregister_queue(request_queue_t *q);
 extern int elv_may_queue(request_queue_t *, int, struct bio *);
 extern void elv_completed_request(request_queue_t *, struct request *);
-extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int);
+extern int elv_set_request(request_queue_t *, struct request *, struct bio *, gfp_t);
 extern void elv_put_request(request_queue_t *, struct request *);
 
 /*
@@ -142,4 +140,6 @@ enum {
        ELV_MQUEUE_MUST,
 };
 
+#define rq_end_sector(rq)      ((rq)->sector + (rq)->nr_sectors)
+
 #endif
index e0b77c5af9a02aadab2a1fcd2e967c92c402ce16..f83d997c55820101385553e780fa5216d2be25fc 100644 (file)
@@ -320,7 +320,7 @@ struct address_space_operations {
        /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
        sector_t (*bmap)(struct address_space *, sector_t);
        int (*invalidatepage) (struct page *, unsigned long);
-       int (*releasepage) (struct page *, int);
+       int (*releasepage) (struct page *, gfp_t);
        ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
                        loff_t offset, unsigned long nr_segs);
        struct page* (*get_xip_page)(struct address_space *, sector_t,
index 01796c41c951619f6afb9e7b3132ea28aec81b86..142e1c1e06899423948cb1e71d8db34cb33dc437 100644 (file)
@@ -119,7 +119,7 @@ struct gendisk {
        int policy;
 
        atomic_t sync_io;               /* RAID */
-       unsigned long stamp, stamp_idle;
+       unsigned long stamp;
        int in_flight;
 #ifdef CONFIG_SMP
        struct disk_stats *dkstats;
index 3010e172394dc148539fdb3c5476310d16012769..c3779432a7239b5e2b85494f594d69db3adb6fb8 100644 (file)
@@ -12,8 +12,8 @@ struct vm_area_struct;
  * GFP bitmasks..
  */
 /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
-#define __GFP_DMA      0x01u
-#define __GFP_HIGHMEM  0x02u
+#define __GFP_DMA      ((__force gfp_t)0x01u)
+#define __GFP_HIGHMEM  ((__force gfp_t)0x02u)
 
 /*
  * Action modifiers - doesn't change the zoning
@@ -26,24 +26,24 @@ struct vm_area_struct;
  *
  * __GFP_NORETRY: The VM implementation must not retry indefinitely.
  */
-#define __GFP_WAIT     0x10u   /* Can wait and reschedule? */
-#define __GFP_HIGH     0x20u   /* Should access emergency pools? */
-#define __GFP_IO       0x40u   /* Can start physical IO? */
-#define __GFP_FS       0x80u   /* Can call down to low-level FS? */
-#define __GFP_COLD     0x100u  /* Cache-cold page required */
-#define __GFP_NOWARN   0x200u  /* Suppress page allocation failure warning */
-#define __GFP_REPEAT   0x400u  /* Retry the allocation.  Might fail */
-#define __GFP_NOFAIL   0x800u  /* Retry for ever.  Cannot fail */
-#define __GFP_NORETRY  0x1000u /* Do not retry.  Might fail */
-#define __GFP_NO_GROW  0x2000u /* Slab internal usage */
-#define __GFP_COMP     0x4000u /* Add compound page metadata */
-#define __GFP_ZERO     0x8000u /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
-#define __GFP_NORECLAIM  0x20000u /* No realy zone reclaim during allocation */
-#define __GFP_HARDWALL   0x40000u /* Enforce hardwall cpuset memory allocs */
+#define __GFP_WAIT     ((__force gfp_t)0x10u)  /* Can wait and reschedule? */
+#define __GFP_HIGH     ((__force gfp_t)0x20u)  /* Should access emergency pools? */
+#define __GFP_IO       ((__force gfp_t)0x40u)  /* Can start physical IO? */
+#define __GFP_FS       ((__force gfp_t)0x80u)  /* Can call down to low-level FS? */
+#define __GFP_COLD     ((__force gfp_t)0x100u) /* Cache-cold page required */
+#define __GFP_NOWARN   ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
+#define __GFP_REPEAT   ((__force gfp_t)0x400u) /* Retry the allocation.  Might fail */
+#define __GFP_NOFAIL   ((__force gfp_t)0x800u) /* Retry for ever.  Cannot fail */
+#define __GFP_NORETRY  ((__force gfp_t)0x1000u)/* Do not retry.  Might fail */
+#define __GFP_NO_GROW  ((__force gfp_t)0x2000u)/* Slab internal usage */
+#define __GFP_COMP     ((__force gfp_t)0x4000u)/* Add compound page metadata */
+#define __GFP_ZERO     ((__force gfp_t)0x8000u)/* Return zeroed page on success */
+#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
+#define __GFP_NORECLAIM  ((__force gfp_t)0x20000u) /* No realy zone reclaim during allocation */
+#define __GFP_HARDWALL   ((__force gfp_t)0x40000u) /* Enforce hardwall cpuset memory allocs */
 
 #define __GFP_BITS_SHIFT 20    /* Room for 20 __GFP_FOO bits */
-#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* if you forget to add the bitmask here kernel will crash, period */
 #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
@@ -64,6 +64,7 @@ struct vm_area_struct;
 
 #define GFP_DMA                __GFP_DMA
 
+#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK))
 
 /*
  * There is only one page-allocator function, and two main namespaces to
@@ -94,7 +95,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
                return NULL;
 
        return __alloc_pages(gfp_mask, order,
-               NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+               NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
 }
 
 #ifdef CONFIG_NUMA
index bdc286ec947c13b2017a9dcc3dd5f49f0ed60533..b4af45aad25df5bf8fb669983bae3eacf76c9d62 100644 (file)
@@ -492,7 +492,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
  *     Returns 0 on success or -ENOMEM on failure.
  */
 static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
-                               size_t len, unsigned int gfp_mask)
+                               size_t len, gfp_t gfp_mask)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        int dma_64 = 0;
@@ -551,7 +551,7 @@ static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
  *     Returns the 0 on success or negative error code on failure.
  */
 static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
-                                 size_t len, unsigned int gfp_mask)
+                                 size_t len, gfp_t gfp_mask)
 {
        i2o_dma_free(dev, addr);
 
index 2ef0b21517fbd20f56fe85c43aa5c7c14786c2c8..1c7a0dd5536aca7e8d2a1527ec60e48b0d26736c 100644 (file)
@@ -7,8 +7,8 @@
 /* ported to the Alpha architecture 02/20/96 (just used the HZ macro) */
 
 #define TR_RETRY_INTERVAL      (30*HZ) /* 500 on PC = 5 s */
-#define TR_RST_TIME            (HZ/20) /* 5 on PC = 50 ms */
-#define TR_BUSY_INTERVAL       (HZ/5)  /* 5 on PC = 200 ms */
+#define TR_RST_TIME            (msecs_to_jiffies(50))  /* 5 on PC = 50 ms */
+#define TR_BUSY_INTERVAL       (msecs_to_jiffies(200)) /* 5 on PC = 200 ms */
 #define TR_SPIN_INTERVAL       (3*HZ)  /* 3 seconds before init timeout */
 
 #define TR_ISA 1
index 3d5de45f961b050d9780795c0bb07a34c866911c..7fb3ff9c7b0e4763b15b3ebf1aad0148a64f8e00 100644 (file)
@@ -71,7 +71,7 @@ struct idr {
  */
 
 void *idr_find(struct idr *idp, int id);
-int idr_pre_get(struct idr *idp, unsigned gfp_mask);
+int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
 int idr_get_new(struct idr *idp, void *ptr, int *id);
 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
 void idr_remove(struct idr *idp, int id);
index 0856548a2a08b2eadab1b6e9de56dfc667993673..a8b1a2071838f70794fc7605b0b44f5def7f44d4 100644 (file)
@@ -84,6 +84,7 @@
 #define ARPHRD_IEEE802_TR 800          /* Magic type ident for TR      */
 #define ARPHRD_IEEE80211 801           /* IEEE 802.11                  */
 #define ARPHRD_IEEE80211_PRISM 802     /* IEEE 802.11 + Prism2 header  */
+#define ARPHRD_IEEE80211_RADIOTAP 803  /* IEEE 802.11 + radiotap header */
 
 #define ARPHRD_VOID      0xFFFF        /* Void type, nothing is known */
 #define ARPHRD_NONE      0xFFFE        /* zero header length */
index ff853b3173c6545a189da4c1ddd04a055c5218a0..be197eb900777167fedecdce824be0c2275a78d5 100644 (file)
@@ -69,7 +69,7 @@ extern int journal_enable_debug;
 #define jbd_debug(f, a...)     /**/
 #endif
 
-extern void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry);
+extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
 #define jbd_kmalloc(size, flags) \
        __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
 #define jbd_rep_kmalloc(size, flags) \
@@ -890,7 +890,7 @@ extern int   journal_forget (handle_t *, struct buffer_head *);
 extern void     journal_sync_buffer (struct buffer_head *);
 extern int      journal_invalidatepage(journal_t *,
                                struct page *, unsigned long);
-extern int      journal_try_to_free_buffers(journal_t *, struct page *, int);
+extern int      journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
 extern int      journal_stop(handle_t *);
 extern int      journal_flush (journal_t *);
 extern void     journal_lock_updates (journal_t *);
index 3b22304f12fd9f542f9a09f10e7bcde9531a9fd7..7f7403aa4a417c4fbf7ab8ad5740dff8aadb61f8 100644 (file)
@@ -65,7 +65,7 @@ extern void kobject_unregister(struct kobject *);
 extern struct kobject * kobject_get(struct kobject *);
 extern void kobject_put(struct kobject *);
 
-extern char * kobject_get_path(struct kobject *, int);
+extern char * kobject_get_path(struct kobject *, gfp_t);
 
 struct kobj_type {
        void (*release)(struct kobject *);
index ceee1fc42c600d0aa0d045edb37db8c253fec457..00a8a5738858bd4e3a13e53cd93c3ec03196f22b 100644 (file)
@@ -91,12 +91,13 @@ enum {
        ATA_SHT_EMULATED        = 1,
        ATA_SHT_CMD_PER_LUN     = 1,
        ATA_SHT_THIS_ID         = -1,
-       ATA_SHT_USE_CLUSTERING  = 0,
+       ATA_SHT_USE_CLUSTERING  = 1,
 
        /* struct ata_device stuff */
        ATA_DFLAG_LBA48         = (1 << 0), /* device supports LBA48 */
        ATA_DFLAG_PIO           = (1 << 1), /* device currently in PIO mode */
        ATA_DFLAG_LOCK_SECTORS  = (1 << 2), /* don't adjust max_sectors */
+       ATA_DFLAG_LBA           = (1 << 3), /* device supports LBA */
 
        ATA_DEV_UNKNOWN         = 0,    /* unknown device */
        ATA_DEV_ATA             = 1,    /* ATA device */
@@ -154,17 +155,21 @@ enum {
        ATA_SHIFT_UDMA          = 0,
        ATA_SHIFT_MWDMA         = 8,
        ATA_SHIFT_PIO           = 11,
+       
+       /* Masks for port functions */
+       ATA_PORT_PRIMARY        = (1 << 0),
+       ATA_PORT_SECONDARY      = (1 << 1),
 };
 
-enum pio_task_states {
-       PIO_ST_UNKNOWN,
-       PIO_ST_IDLE,
-       PIO_ST_POLL,
-       PIO_ST_TMOUT,
-       PIO_ST,
-       PIO_ST_LAST,
-       PIO_ST_LAST_POLL,
-       PIO_ST_ERR,
+enum hsm_task_states {
+       HSM_ST_UNKNOWN,
+       HSM_ST_IDLE,
+       HSM_ST_POLL,
+       HSM_ST_TMOUT,
+       HSM_ST,
+       HSM_ST_LAST,
+       HSM_ST_LAST_POLL,
+       HSM_ST_ERR,
 };
 
 /* forward declarations */
@@ -197,7 +202,7 @@ struct ata_ioports {
 struct ata_probe_ent {
        struct list_head        node;
        struct device           *dev;
-       struct ata_port_operations      *port_ops;
+       const struct ata_port_operations *port_ops;
        Scsi_Host_Template      *sht;
        struct ata_ioports      port[ATA_MAX_PORTS];
        unsigned int            n_ports;
@@ -220,7 +225,7 @@ struct ata_host_set {
        void __iomem            *mmio_base;
        unsigned int            n_ports;
        void                    *private_data;
-       struct ata_port_operations *ops;
+       const struct ata_port_operations *ops;
        struct ata_port *       ports[0];
 };
 
@@ -278,15 +283,18 @@ struct ata_device {
        u8                      xfer_mode;
        unsigned int            xfer_shift;     /* ATA_SHIFT_xxx */
 
-       /* cache info about current transfer mode */
-       u8                      xfer_protocol;  /* taskfile xfer protocol */
-       u8                      read_cmd;       /* opcode to use on read */
-       u8                      write_cmd;      /* opcode to use on write */
+       unsigned int            multi_count;    /* sectors count for
+                                                  READ/WRITE MULTIPLE */
+
+       /* for CHS addressing */
+       u16                     cylinders;      /* Number of cylinders */
+       u16                     heads;          /* Number of heads */
+       u16                     sectors;        /* Number of sectors per track */
 };
 
 struct ata_port {
        struct Scsi_Host        *host;  /* our co-allocated scsi host */
-       struct ata_port_operations      *ops;
+       const struct ata_port_operations *ops;
        unsigned long           flags;  /* ATA_FLAG_xxx */
        unsigned int            id;     /* unique id req'd by scsi midlyr */
        unsigned int            port_no; /* unique port #; from zero */
@@ -319,7 +327,7 @@ struct ata_port {
        struct work_struct      packet_task;
 
        struct work_struct      pio_task;
-       unsigned int            pio_task_state;
+       unsigned int            hsm_task_state;
        unsigned long           pio_task_timeout;
 
        void                    *private_data;
@@ -333,10 +341,10 @@ struct ata_port_operations {
        void (*set_piomode) (struct ata_port *, struct ata_device *);
        void (*set_dmamode) (struct ata_port *, struct ata_device *);
 
-       void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
+       void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
        void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
 
-       void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
+       void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
        u8   (*check_status)(struct ata_port *ap);
        u8   (*check_altstatus)(struct ata_port *ap);
        u8   (*check_err)(struct ata_port *ap);
@@ -377,9 +385,22 @@ struct ata_port_info {
        unsigned long           pio_mask;
        unsigned long           mwdma_mask;
        unsigned long           udma_mask;
-       struct ata_port_operations      *port_ops;
+       const struct ata_port_operations *port_ops;
+};
+
+struct ata_timing {
+       unsigned short mode;            /* ATA mode */
+       unsigned short setup;           /* t1 */
+       unsigned short act8b;           /* t2 for 8-bit I/O */
+       unsigned short rec8b;           /* t2i for 8-bit I/O */
+       unsigned short cyc8b;           /* t0 for 8-bit I/O */
+       unsigned short active;          /* t2 or tD */
+       unsigned short recover;         /* t2i or tK */
+       unsigned short cycle;           /* t0 */
+       unsigned short udma;            /* t2CYCTYP/2 */
 };
 
+#define FIT(v,vmin,vmax)       max_t(short,min_t(short,v,vmax),vmin)
 
 extern void ata_port_probe(struct ata_port *);
 extern void __sata_phy_reset(struct ata_port *ap);
@@ -392,7 +413,7 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i
                             unsigned int n_ports);
 extern void ata_pci_remove_one (struct pci_dev *pdev);
 #endif /* CONFIG_PCI */
-extern int ata_device_add(struct ata_probe_ent *ent);
+extern int ata_device_add(const struct ata_probe_ent *ent);
 extern void ata_host_set_remove(struct ata_host_set *host_set);
 extern int ata_scsi_detect(Scsi_Host_Template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
@@ -400,19 +421,21 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
 extern int ata_scsi_error(struct Scsi_Host *host);
 extern int ata_scsi_release(struct Scsi_Host *host);
 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
+extern int ata_ratelimit(void);
+
 /*
  * Default driver ops implementations
  */
-extern void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
-extern void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp);
-extern void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
 extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
 extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
 extern u8 ata_check_status(struct ata_port *ap);
 extern u8 ata_altstatus(struct ata_port *ap);
 extern u8 ata_chk_err(struct ata_port *ap);
-extern void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
 extern int ata_port_start (struct ata_port *ap);
 extern void ata_port_stop (struct ata_port *ap);
 extern void ata_host_stop (struct ata_host_set *host_set);
@@ -423,8 +446,8 @@ extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
                unsigned int buflen);
 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
                 unsigned int n_elem);
-extern unsigned int ata_dev_classify(struct ata_taskfile *tf);
-extern void ata_dev_id_string(u16 *id, unsigned char *s,
+extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern void ata_dev_id_string(const u16 *id, unsigned char *s,
                              unsigned int ofs, unsigned int len);
 extern void ata_dev_config(struct ata_port *ap, unsigned int i);
 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
@@ -441,6 +464,32 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
                              sector_t capacity, int geom[]);
 extern int ata_scsi_slave_config(struct scsi_device *sdev);
 
+/*
+ * Timing helpers
+ */
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+                             struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+                            const struct ata_timing *, struct ata_timing *,
+                            unsigned int);
+
+enum {
+       ATA_TIMING_SETUP        = (1 << 0),
+       ATA_TIMING_ACT8B        = (1 << 1),
+       ATA_TIMING_REC8B        = (1 << 2),
+       ATA_TIMING_CYC8B        = (1 << 3),
+       ATA_TIMING_8BIT         = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
+                                 ATA_TIMING_CYC8B,
+       ATA_TIMING_ACTIVE       = (1 << 4),
+       ATA_TIMING_RECOVER      = (1 << 5),
+       ATA_TIMING_CYCLE        = (1 << 6),
+       ATA_TIMING_UDMA         = (1 << 7),
+       ATA_TIMING_ALL          = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
+                                 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
+                                 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
+                                 ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
+};
+
 
 #ifdef CONFIG_PCI
 struct pci_bits {
@@ -452,8 +501,8 @@ struct pci_bits {
 
 extern void ata_pci_host_stop (struct ata_host_set *host_set);
 extern struct ata_probe_ent *
-ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port);
-extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
+ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
+extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
 
 #endif /* CONFIG_PCI */
 
@@ -463,7 +512,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
        return (tag < ATA_MAX_QUEUE) ? 1 : 0;
 }
 
-static inline unsigned int ata_dev_present(struct ata_device *dev)
+static inline unsigned int ata_dev_present(const struct ata_device *dev)
 {
        return ((dev->class == ATA_DEV_ATA) ||
                (dev->class == ATA_DEV_ATAPI));
@@ -662,7 +711,7 @@ static inline unsigned int sata_dev_present(struct ata_port *ap)
        return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
 }
 
-static inline int ata_try_flush_cache(struct ata_device *dev)
+static inline int ata_try_flush_cache(const struct ata_device *dev)
 {
        return ata_id_wcache_enabled(dev->id) ||
               ata_id_has_flush(dev->id) ||
index 53fa51595443e81d2878ab22902f0815d1933844..40f63c9879d26798bbfc073753cc58ecd9de306b 100644 (file)
@@ -52,7 +52,7 @@ struct loop_device {
        unsigned        lo_blocksize;
        void            *key_data; 
 
-       int             old_gfp_mask;
+       gfp_t           old_gfp_mask;
 
        spinlock_t              lo_lock;
        struct bio              *lo_bio;
index 9263d2db2d670f41d24a2c9ec707c6a98db09ee8..99e044b4efc6d62ea92e79f79a2b74ec8b8559ad 100644 (file)
@@ -22,7 +22,7 @@ struct mb_cache_entry {
 };
 
 struct mb_cache_op {
-       int (*free)(struct mb_cache_entry *, int);
+       int (*free)(struct mb_cache_entry *, gfp_t);
 };
 
 /* Functions on caches */
index 9b8d0476988ad3bb1aa9bfeeae74b9d8b1a6f57f..68f5a0f392dd969a9e063a2e835ecced5ba1c61f 100644 (file)
@@ -158,6 +158,7 @@ extern int mii_link_ok (struct mii_if_info *mii);
 extern int mii_nway_restart (struct mii_if_info *mii);
 extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
 extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_check_gmii_support(struct mii_if_info *mii);
 extern void mii_check_link (struct mii_if_info *mii);
 extern unsigned int mii_check_media (struct mii_if_info *mii,
                                     unsigned int ok_to_print,
index 097b3a3c693d58c924dd7409db1dd295ebfe8a23..e1649578fb0ca4cec40f93d1b358993f8bb69d23 100644 (file)
@@ -747,7 +747,7 @@ extern unsigned long do_mremap(unsigned long addr,
  * The callback will be passed nr_to_scan == 0 when the VM is querying the
  * cache size, so a fastpath for that case is appropriate.
  */
-typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask);
+typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
 
 /*
  * Add an aging callback.  The int is the number of 'seeks' it takes
index 5ed471b58f4f0c83904101ee6cb734bb104a47ad..7519eb4191e7d962e6a34e9831cb86441531ffdb 100644 (file)
@@ -302,7 +302,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive,
 void build_all_zonelists(void);
 void wakeup_kswapd(struct zone *zone, int order);
 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
-               int alloc_type, int can_try_harder, int gfp_high);
+               int alloc_type, int can_try_harder, gfp_t gfp_high);
 
 #ifdef CONFIG_HAVE_MEMORY_PRESENT
 void memory_present(int nid, unsigned long start, unsigned long end);
index 7db67b008cac425a089de3777a95210ccc50e586..1c975d0d9e949b628eb4fa35ef5001c763fb529c 100644 (file)
@@ -8,6 +8,7 @@ struct vfsmount;
 struct open_intent {
        int     flags;
        int     create_mode;
+       struct file *file;
 };
 
 enum { MAX_NESTED_LINKS = 5 };
@@ -65,6 +66,13 @@ extern int FASTCALL(link_path_walk(const char *, struct nameidata *));
 extern void path_release(struct nameidata *);
 extern void path_release_on_umount(struct nameidata *);
 
+extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
+extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags);
+extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+               int (*open)(struct inode *, struct file *));
+extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
+extern void release_open_intent(struct nameidata *);
+
 extern struct dentry * lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry * lookup_hash(struct qstr *, struct dentry *);
 
index 368e4c825ff1b5006adfe8a962083500a6776e0c..a9281b24c40b8bfa748899dad4c5e17574e4aaaf 100644 (file)
@@ -873,11 +873,9 @@ static inline void netif_rx_complete(struct net_device *dev)
 
 static inline void netif_poll_disable(struct net_device *dev)
 {
-       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state))
                /* No hurry. */
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
-       }
+               schedule_timeout_interruptible(1);
 }
 
 static inline void netif_poll_enable(struct net_device *dev)
index 9a6047ff1b25b198a869678463ed0561cbf417cc..325fe7ae49bb7be58270b261c52640de10085535 100644 (file)
 #define NFS_MAX_FILE_IO_BUFFER_SIZE    32768
 #define NFS_DEF_FILE_IO_BUFFER_SIZE    4096
 
+/* Default timeout values */
+#define NFS_MAX_UDP_TIMEOUT    (60*HZ)
+#define NFS_MAX_TCP_TIMEOUT    (600*HZ)
+
 /*
  * superblock magic number for NFS
  */
@@ -137,6 +141,7 @@ struct nfs_inode {
        unsigned long           attrtimeo_timestamp;
        __u64                   change_attr;            /* v4 only */
 
+       unsigned long           last_updated;
        /* "Generation counter" for the attribute cache. This is
         * bumped whenever we update the metadata on the
         * server.
@@ -236,13 +241,17 @@ static inline int nfs_caches_unstable(struct inode *inode)
        return atomic_read(&NFS_I(inode)->data_updates) != 0;
 }
 
+static inline void nfs_mark_for_revalidate(struct inode *inode)
+{
+       spin_lock(&inode->i_lock);
+       NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+       spin_unlock(&inode->i_lock);
+}
+
 static inline void NFS_CACHEINV(struct inode *inode)
 {
-       if (!nfs_caches_unstable(inode)) {
-               spin_lock(&inode->i_lock);
-               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
-               spin_unlock(&inode->i_lock);
-       }
+       if (!nfs_caches_unstable(inode))
+               nfs_mark_for_revalidate(inode);
 }
 
 static inline int nfs_server_capable(struct inode *inode, int cap)
@@ -276,7 +285,7 @@ static inline long nfs_save_change_attribute(struct inode *inode)
 static inline int nfs_verify_change_attribute(struct inode *inode, unsigned long chattr)
 {
        return !nfs_caches_unstable(inode)
-               && chattr == NFS_I(inode)->cache_change_attribute;
+               && time_after_eq(chattr, NFS_I(inode)->cache_change_attribute);
 }
 
 /*
@@ -286,6 +295,7 @@ extern void nfs_zap_caches(struct inode *);
 extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
                                struct nfs_fattr *);
 extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int nfs_permission(struct inode *, int, struct nameidata *);
 extern int nfs_access_get_cached(struct inode *, struct rpc_cred *, struct nfs_access_entry *);
@@ -312,6 +322,12 @@ extern void nfs_file_clear_open_context(struct file *filp);
 /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
 extern u32 root_nfs_parse_addr(char *name); /*__init*/
 
+static inline void nfs_fattr_init(struct nfs_fattr *fattr)
+{
+       fattr->valid = 0;
+       fattr->time_start = jiffies;
+}
+
 /*
  * linux/fs/nfs/file.c
  */
index a2bf6914ff1b3aa1c444ef1478ac078a44070db6..40718669b9c896e45939e6490f38d843d67623ea 100644 (file)
@@ -41,7 +41,7 @@ struct nfs_fattr {
        __u32                   bitmap[2];      /* NFSv4 returned attribute bitmap */
        __u64                   change_attr;    /* NFSv4 change attribute */
        __u64                   pre_change_attr;/* pre-op NFSv4 change attribute */
-       unsigned long           timestamp;
+       unsigned long           time_start;
 };
 
 #define NFS_ATTR_WCC           0x0001          /* pre-op WCC data    */
@@ -96,12 +96,13 @@ struct nfs4_change_info {
        u64                     after;
 };
 
+struct nfs_seqid;
 /*
  * Arguments to the open call.
  */
 struct nfs_openargs {
        const struct nfs_fh *   fh;
-       __u32                   seqid;
+       struct nfs_seqid *      seqid;
        int                     open_flags;
        __u64                   clientid;
        __u32                   id;
@@ -123,6 +124,7 @@ struct nfs_openres {
        struct nfs4_change_info cinfo;
        __u32                   rflags;
        struct nfs_fattr *      f_attr;
+       struct nfs_fattr *      dir_attr;
        const struct nfs_server *server;
        int                     delegation_type;
        nfs4_stateid            delegation;
@@ -136,7 +138,7 @@ struct nfs_openres {
 struct nfs_open_confirmargs {
        const struct nfs_fh *   fh;
        nfs4_stateid            stateid;
-       __u32                   seqid;
+       struct nfs_seqid *      seqid;
 };
 
 struct nfs_open_confirmres {
@@ -148,13 +150,16 @@ struct nfs_open_confirmres {
  */
 struct nfs_closeargs {
        struct nfs_fh *         fh;
-       nfs4_stateid            stateid;
-       __u32                   seqid;
+       nfs4_stateid *          stateid;
+       struct nfs_seqid *      seqid;
        int                     open_flags;
+       const u32 *             bitmask;
 };
 
 struct nfs_closeres {
        nfs4_stateid            stateid;
+       struct nfs_fattr *      fattr;
+       const struct nfs_server *server;
 };
 /*
  *  * Arguments to the lock,lockt, and locku call.
@@ -164,30 +169,19 @@ struct nfs_lowner {
        u32                     id;
 };
 
-struct nfs_open_to_lock {
-       __u32                   open_seqid;
-       nfs4_stateid            open_stateid;
-       __u32                   lock_seqid;
-       struct nfs_lowner       lock_owner;
-};
-
-struct nfs_exist_lock {
-       nfs4_stateid            stateid;
-       __u32                   seqid;
-};
-
 struct nfs_lock_opargs {
+       struct nfs_seqid *      lock_seqid;
+       nfs4_stateid *          lock_stateid;
+       struct nfs_seqid *      open_seqid;
+       nfs4_stateid *          open_stateid;
+       struct nfs_lowner       lock_owner;
        __u32                   reclaim;
        __u32                   new_lock_owner;
-       union {
-               struct nfs_open_to_lock *open_lock;
-               struct nfs_exist_lock   *exist_lock;
-       } u;
 };
 
 struct nfs_locku_opargs {
-       __u32                   seqid;
-       nfs4_stateid            stateid;
+       struct nfs_seqid *      seqid;
+       nfs4_stateid *          stateid;
 };
 
 struct nfs_lockargs {
@@ -262,6 +256,7 @@ struct nfs_writeargs {
        enum nfs3_stable_how    stable;
        unsigned int            pgbase;
        struct page **          pages;
+       const u32 *             bitmask;
 };
 
 struct nfs_writeverf {
@@ -273,6 +268,7 @@ struct nfs_writeres {
        struct nfs_fattr *      fattr;
        struct nfs_writeverf *  verf;
        __u32                   count;
+       const struct nfs_server *server;
 };
 
 /*
@@ -550,6 +546,7 @@ struct nfs4_create_res {
        struct nfs_fh *                 fh;
        struct nfs_fattr *              fattr;
        struct nfs4_change_info         dir_cinfo;
+       struct nfs_fattr *              dir_fattr;
 };
 
 struct nfs4_fsinfo_arg {
@@ -571,8 +568,17 @@ struct nfs4_link_arg {
        const struct nfs_fh *           fh;
        const struct nfs_fh *           dir_fh;
        const struct qstr *             name;
+       const u32 *                     bitmask;
+};
+
+struct nfs4_link_res {
+       const struct nfs_server *       server;
+       struct nfs_fattr *              fattr;
+       struct nfs4_change_info         cinfo;
+       struct nfs_fattr *              dir_attr;
 };
 
+
 struct nfs4_lookup_arg {
        const struct nfs_fh *           dir_fh;
        const struct qstr *             name;
@@ -619,6 +625,13 @@ struct nfs4_readlink {
 struct nfs4_remove_arg {
        const struct nfs_fh *           fh;
        const struct qstr *             name;
+       const u32 *                     bitmask;
+};
+
+struct nfs4_remove_res {
+       const struct nfs_server *       server;
+       struct nfs4_change_info         cinfo;
+       struct nfs_fattr *              dir_attr;
 };
 
 struct nfs4_rename_arg {
@@ -626,11 +639,15 @@ struct nfs4_rename_arg {
        const struct nfs_fh *           new_dir;
        const struct qstr *             old_name;
        const struct qstr *             new_name;
+       const u32 *                     bitmask;
 };
 
 struct nfs4_rename_res {
+       const struct nfs_server *       server;
        struct nfs4_change_info         old_cinfo;
+       struct nfs_fattr *              old_fattr;
        struct nfs4_change_info         new_cinfo;
+       struct nfs_fattr *              new_fattr;
 };
 
 struct nfs4_setclientid {
@@ -722,7 +739,7 @@ struct nfs_rpc_ops {
        int     (*write)   (struct nfs_write_data *);
        int     (*commit)  (struct nfs_write_data *);
        int     (*create)  (struct inode *, struct dentry *,
-                           struct iattr *, int);
+                           struct iattr *, int, struct nameidata *);
        int     (*remove)  (struct inode *, struct qstr *);
        int     (*unlink_setup)  (struct rpc_message *,
                            struct dentry *, struct qstr *);
index acbf31c154f8576a6658c9f848c2bf234ba789f0..ba6c310a055fa123c0169dfd47ecdbdd11dcfb19 100644 (file)
 
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
-       return mapping->flags & __GFP_BITS_MASK;
+       return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
 }
 
 /*
  * This is non-atomic.  Only to be used before the mapping is activated.
  * Probably needs a barrier...
  */
-static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
+static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 {
-       m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
+       m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
+                               (__force unsigned long)mask;
 }
 
 /*
@@ -69,7 +70,7 @@ extern struct page * find_lock_page(struct address_space *mapping,
 extern struct page * find_trylock_page(struct address_space *mapping,
                                unsigned long index);
 extern struct page * find_or_create_page(struct address_space *mapping,
-                               unsigned long index, unsigned int gfp_mask);
+                               unsigned long index, gfp_t gfp_mask);
 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
                        unsigned int nr_pages, struct page **pages);
 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
@@ -92,9 +93,9 @@ extern int read_cache_pages(struct address_space *mapping,
                struct list_head *pages, filler_t *filler, void *data);
 
 int add_to_page_cache(struct page *page, struct address_space *mapping,
-                               unsigned long index, int gfp_mask);
+                               unsigned long index, gfp_t gfp_mask);
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-                               unsigned long index, int gfp_mask);
+                               unsigned long index, gfp_t gfp_mask);
 extern void remove_from_page_cache(struct page *page);
 extern void __remove_from_page_cache(struct page *page);
 
index 045d4761febce8557b542cbffb9cfc04776b65f8..9f0f9281f42a01628adbdb80f317172486eca583 100644 (file)
@@ -24,7 +24,7 @@
 
 struct radix_tree_root {
        unsigned int            height;
-       unsigned int            gfp_mask;
+       gfp_t                   gfp_mask;
        struct radix_tree_node  *rnode;
 };
 
index af00b10294cde3abfc57b09a4d6c6867413575b3..001ab82df051dd21ba5697f5b258ff8a13731e8f 100644 (file)
@@ -1972,7 +1972,7 @@ extern struct address_space_operations reiserfs_address_space_operations;
 
 /* fix_nodes.c */
 #ifdef CONFIG_REISERFS_CHECK
-void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s);
+void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s);
 void reiserfs_kfree(const void *vp, size_t size, struct super_block *s);
 #else
 static inline void *reiserfs_kmalloc(size_t size, int flags,
index 78f634007fc6b88d09ddf1ffde90cea1e7c58f72..c85e103d5e7b664d9f0d3ad005175705f4ee3bce 100644 (file)
@@ -52,12 +52,8 @@ typedef struct sdlahw
 
 extern int sdla_setup  (sdlahw_t* hw, void* sfm, unsigned len);
 extern int sdla_down   (sdlahw_t* hw);
-extern int sdla_inten  (sdlahw_t* hw);
-extern int sdla_intde  (sdlahw_t* hw);
-extern int sdla_intack (sdlahw_t* hw);
 extern void S514_intack  (sdlahw_t* hw, u32 int_status);
 extern void read_S514_int_stat (sdlahw_t* hw, u32* int_status);
-extern int sdla_intr   (sdlahw_t* hw);
 extern int sdla_mapmem (sdlahw_t* hw, unsigned long addr);
 extern int sdla_peek   (sdlahw_t* hw, unsigned long addr, void* buf,
                         unsigned len);
index 627382e7405779464e5475040a1cf1ca084b7b49..dac956ed98f0a11c217e58512e37a8e1085ffd7a 100644 (file)
@@ -1210,7 +1210,7 @@ struct security_operations {
        int (*socket_shutdown) (struct socket * sock, int how);
        int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
        int (*socket_getpeersec) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
-       int (*sk_alloc_security) (struct sock *sk, int family, int priority);
+       int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
        void (*sk_free_security) (struct sock *sk);
 #endif /* CONFIG_SECURITY_NETWORK */
 };
index 8f5d9e7f8734abf0307483998c27f4faca71f47c..b756935da9c8ce1b7a05fbf7d3d371a0f05c0852 100644 (file)
@@ -171,7 +171,6 @@ enum {
  *     struct sk_buff - socket buffer
  *     @next: Next buffer in list
  *     @prev: Previous buffer in list
- *     @list: List we are on
  *     @sk: Socket we are owned by
  *     @tstamp: Time we arrived
  *     @dev: Device we arrived on/are leaving by
@@ -190,6 +189,7 @@ enum {
  *     @cloned: Head may be cloned (check refcnt to be sure)
  *     @nohdr: Payload reference only, must not modify header
  *     @pkt_type: Packet class
+ *     @fclone: skbuff clone status
  *     @ip_summed: Driver fed us an IP checksum
  *     @priority: Packet queueing priority
  *     @users: User count - see {datagram,tcp}.c
@@ -202,6 +202,7 @@ enum {
  *     @destructor: Destruct function
  *     @nfmark: Can be used for communication between hooks
  *     @nfct: Associated connection, if any
+ *     @ipvs_property: skbuff is owned by ipvs
  *     @nfctinfo: Relationship of this skb to the connection
  *     @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
  *     @tc_index: Traffic control index
index 5fc04a16ecb09adbce015ca6a7bb5632dae19b29..09b9aa60063dda9d56d062f9e44e2042df72f744 100644 (file)
@@ -121,7 +121,7 @@ extern unsigned int ksize(const void *);
 extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
 extern void *kmalloc_node(size_t size, gfp_t flags, int node);
 #else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
+static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
 {
        return kmem_cache_alloc(cachep, flags);
 }
index 04ebc24db34879ffa70797e30f26e31b98ae8e0b..b68c11a2d6dd912274b146df30d17bc4045800a9 100644 (file)
@@ -66,7 +66,12 @@ struct rpc_cred_cache {
 
 struct rpc_auth {
        unsigned int            au_cslack;      /* call cred size estimate */
-       unsigned int            au_rslack;      /* reply verf size guess */
+                               /* guess at number of u32's auth adds before
+                                * reply data; normally the verifier size: */
+       unsigned int            au_rslack;
+                               /* for gss, used to calculate au_rslack: */
+       unsigned int            au_verfsize;
+
        unsigned int            au_flags;       /* various flags */
        struct rpc_authops *    au_ops;         /* operations */
        rpc_authflavor_t        au_flavor;      /* pseudoflavor (note may
index eadb31e3c1983b850ad3b824ac6a3d0b35617d73..1a42d902bc11e0bb9976177765a8ebcc0263648d 100644 (file)
@@ -32,6 +32,7 @@
 #define RPCDBG_AUTH            0x0010
 #define RPCDBG_PMAP            0x0020
 #define RPCDBG_SCHED           0x0040
+#define RPCDBG_TRANS           0x0080
 #define RPCDBG_SVCSOCK         0x0100
 #define RPCDBG_SVCDSP          0x0200
 #define RPCDBG_MISC            0x0400
@@ -94,6 +95,8 @@ enum {
        CTL_NLMDEBUG,
        CTL_SLOTTABLE_UDP,
        CTL_SLOTTABLE_TCP,
+       CTL_MIN_RESVPORT,
+       CTL_MAX_RESVPORT,
 };
 
 #endif /* _LINUX_SUNRPC_DEBUG_H_ */
index 689262f6305987183568e3f919121eea01bf7208..9b8bcf125c18e79332d9c13762dacb85d8eec8b7 100644 (file)
@@ -40,14 +40,21 @@ int gss_import_sec_context(
                struct gss_ctx          **ctx_id);
 u32 gss_get_mic(
                struct gss_ctx          *ctx_id,
-               u32                     qop,
                struct xdr_buf          *message,
                struct xdr_netobj       *mic_token);
 u32 gss_verify_mic(
                struct gss_ctx          *ctx_id,
                struct xdr_buf          *message,
-               struct xdr_netobj       *mic_token,
-               u32                     *qstate);
+               struct xdr_netobj       *mic_token);
+u32 gss_wrap(
+               struct gss_ctx          *ctx_id,
+               int                     offset,
+               struct xdr_buf          *outbuf,
+               struct page             **inpages);
+u32 gss_unwrap(
+               struct gss_ctx          *ctx_id,
+               int                     offset,
+               struct xdr_buf          *inbuf);
 u32 gss_delete_sec_context(
                struct gss_ctx          **ctx_id);
 
@@ -56,7 +63,6 @@ char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
 
 struct pf_desc {
        u32     pseudoflavor;
-       u32     qop;
        u32     service;
        char    *name;
        char    *auth_domain_name;
@@ -85,14 +91,21 @@ struct gss_api_ops {
                        struct gss_ctx          *ctx_id);
        u32 (*gss_get_mic)(
                        struct gss_ctx          *ctx_id,
-                       u32                     qop, 
                        struct xdr_buf          *message,
                        struct xdr_netobj       *mic_token);
        u32 (*gss_verify_mic)(
                        struct gss_ctx          *ctx_id,
                        struct xdr_buf          *message,
-                       struct xdr_netobj       *mic_token,
-                       u32                     *qstate);
+                       struct xdr_netobj       *mic_token);
+       u32 (*gss_wrap)(
+                       struct gss_ctx          *ctx_id,
+                       int                     offset,
+                       struct xdr_buf          *outbuf,
+                       struct page             **inpages);
+       u32 (*gss_unwrap)(
+                       struct gss_ctx          *ctx_id,
+                       int                     offset,
+                       struct xdr_buf          *buf);
        void (*gss_delete_sec_context)(
                        void                    *internal_ctx_id);
 };
index 92608a2e574c88b8f548d537f6343f64c52b4766..a6807867bd2105e93af78841e4bb5e8796c2d198 100644 (file)
@@ -65,16 +65,6 @@ typedef unsigned int OM_uint32;
 #define GSS_C_MECH_CODE 2
 
 
-/*
- * Define the default Quality of Protection for per-message services.  Note
- * that an implementation that offers multiple levels of QOP may either reserve
- * a value (for example zero, as assumed here) to mean "default protection", or
- * alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit
- * QOP value.  However a value of 0 should always be interpreted by a GSSAPI
- * implementation as a request for the default protection level.
- */
-#define GSS_C_QOP_DEFAULT 0
-
 /*
  * Expiration time of 2^32-1 seconds means infinite lifetime for a
  * credential or security context
index ffe31d2eb9ec5e49a45f012dfa9c33f3baf55041..2c3601d3104503422c970c4aa58ed63dee68e5a8 100644 (file)
@@ -116,18 +116,22 @@ enum seal_alg {
 
 s32
 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-                  struct xdr_netobj *cksum);
+                  int body_offset, struct xdr_netobj *cksum);
+
+u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+               struct xdr_netobj *);
+
+u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+               struct xdr_netobj *);
 
 u32
-krb5_make_token(struct krb5_ctx *context_handle, int qop_req,
-       struct xdr_buf *input_message_buffer,
-       struct xdr_netobj *output_message_buffer, int toktype);
+gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
+               struct xdr_buf *outbuf, struct page **pages);
 
 u32
-krb5_read_token(struct krb5_ctx *context_handle,
-         struct xdr_netobj *input_token_buffer,
-         struct xdr_buf *message_buffer,
-         int *qop_state, int toktype);
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
+               struct xdr_buf *buf);
+
 
 u32
 krb5_encrypt(struct crypto_tfm * key,
@@ -137,6 +141,13 @@ u32
 krb5_decrypt(struct crypto_tfm * key,
             void *iv, void *in, void *out, int length); 
 
+int
+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset,
+               struct page **pages);
+
+int
+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset);
+
 s32
 krb5_make_seq_num(struct crypto_tfm * key,
                int direction,
index b5c9968c3c171b55223f3806fc03dbaf49a7fb64..0beb2cf00a8401b55bd6bad5430cb85960815cb1 100644 (file)
@@ -41,9 +41,9 @@ struct spkm3_ctx {
 #define SPKM_WRAP_TOK  5
 #define SPKM_DEL_TOK   6
 
-u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
+u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
 
-u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype);
+u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype);
 
 #define CKSUMTYPE_RSA_MD5            0x0007
 
index 15f115332389af86f9d56ba6a8ec74da6d7fdf88..f43f237360ae62099997c803b440e2efece721b5 100644 (file)
@@ -76,5 +76,30 @@ enum rpc_auth_stat {
 
 #define RPC_MAXNETNAMELEN      256
 
+/*
+ * From RFC 1831:
+ *
+ * "A record is composed of one or more record fragments.  A record
+ *  fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of
+ *  fragment data.  The bytes encode an unsigned binary number; as with
+ *  XDR integers, the byte order is from highest to lowest.  The number
+ *  encodes two values -- a boolean which indicates whether the fragment
+ *  is the last fragment of the record (bit value 1 implies the fragment
+ *  is the last fragment) and a 31-bit unsigned binary value which is the
+ *  length in bytes of the fragment's data.  The boolean value is the
+ *  highest-order bit of the header; the length is the 31 low-order bits.
+ *  (Note that this record specification is NOT in XDR standard form!)"
+ *
+ * The Linux RPC client always sends its requests in a single record
+ * fragment, limiting the maximum payload size for stream transports to
+ * 2GB.
+ */
+
+typedef u32    rpc_fraghdr;
+
+#define        RPC_LAST_STREAM_FRAGMENT        (1U << 31)
+#define        RPC_FRAGMENT_SIZE_MASK          (~RPC_LAST_STREAM_FRAGMENT)
+#define        RPC_MAX_FRAGMENT_SIZE           ((1U << 31) - 1)
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_MSGPROT_H_ */
index 23448d0fb5bc522324f176859170874c6f2d0a1a..5da968729cf820c9028a9d6202325002c44933aa 100644 (file)
@@ -161,14 +161,10 @@ typedef struct {
 
 typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
 
+extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
 extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
                skb_reader_t *, skb_read_actor_t);
 
-struct socket;
-struct sockaddr;
-extern int xdr_sendpages(struct socket *, struct sockaddr *, int,
-               struct xdr_buf *, unsigned int, int);
-
 extern int xdr_encode_word(struct xdr_buf *, int, u32);
 extern int xdr_decode_word(struct xdr_buf *, int, u32 *);
 
index e618c164981403ad8c2fb2e7aef6c7f134a11599..3b8b6e823c70eab0083330be5dd8b25727b037c0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/include/linux/sunrpc/clnt_xprt.h
+ *  linux/include/linux/sunrpc/xprt.h
  *
  *  Declarations for the RPC transport interface.
  *
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/xdr.h>
 
-/*
- * The transport code maintains an estimate on the maximum number of out-
- * standing RPC requests, using a smoothed version of the congestion
- * avoidance implemented in 44BSD. This is basically the Van Jacobson
- * congestion algorithm: If a retransmit occurs, the congestion window is
- * halved; otherwise, it is incremented by 1/cwnd when
- *
- *     -       a reply is received and
- *     -       a full number of requests are outstanding and
- *     -       the congestion window hasn't been updated recently.
- *
- * Upper procedures may check whether a request would block waiting for
- * a free RPC slot by using the RPC_CONGESTED() macro.
- */
 extern unsigned int xprt_udp_slot_table_entries;
 extern unsigned int xprt_tcp_slot_table_entries;
 
@@ -36,34 +22,23 @@ extern unsigned int xprt_tcp_slot_table_entries;
 #define RPC_DEF_SLOT_TABLE     (16U)
 #define RPC_MAX_SLOT_TABLE     (128U)
 
-#define RPC_CWNDSHIFT          (8U)
-#define RPC_CWNDSCALE          (1U << RPC_CWNDSHIFT)
-#define RPC_INITCWND           RPC_CWNDSCALE
-#define RPC_MAXCWND(xprt)      ((xprt)->max_reqs << RPC_CWNDSHIFT)
-#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-
-/* Default timeout values */
-#define RPC_MAX_UDP_TIMEOUT    (60*HZ)
-#define RPC_MAX_TCP_TIMEOUT    (600*HZ)
-
 /*
- * Wait duration for an RPC TCP connection to be established.  Solaris
- * NFS over TCP uses 60 seconds, for example, which is in line with how
- * long a server takes to reboot.
+ * RPC call and reply header size as number of 32bit words (verifier
+ * size computed separately)
  */
-#define RPC_CONNECT_TIMEOUT    (60*HZ)
+#define RPC_CALLHDRSIZE                6
+#define RPC_REPHDRSIZE         4
 
 /*
- * Delay an arbitrary number of seconds before attempting to reconnect
- * after an error.
+ * Parameters for choosing a free port
  */
-#define RPC_REESTABLISH_TIMEOUT        (15*HZ)
+extern unsigned int xprt_min_resvport;
+extern unsigned int xprt_max_resvport;
 
-/* RPC call and reply header size as number of 32bit words (verifier
- * size computed separately)
- */
-#define RPC_CALLHDRSIZE                6
-#define RPC_REPHDRSIZE         4
+#define RPC_MIN_RESVPORT       (1U)
+#define RPC_MAX_RESVPORT       (65535U)
+#define RPC_DEF_MIN_RESVPORT   (650U)
+#define RPC_DEF_MAX_RESVPORT   (1023U)
 
 /*
  * This describes a timeout strategy
@@ -76,6 +51,9 @@ struct rpc_timeout {
        unsigned char           to_exponential;
 };
 
+struct rpc_task;
+struct rpc_xprt;
+
 /*
  * This describes a complete RPC request
  */
@@ -95,7 +73,10 @@ struct rpc_rqst {
        int                     rq_cong;        /* has incremented xprt->cong */
        int                     rq_received;    /* receive completed */
        u32                     rq_seqno;       /* gss seq no. used on req. */
-
+       int                     rq_enc_pages_num;
+       struct page             **rq_enc_pages; /* scratch pages for use by
+                                                  gss privacy code */
+       void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
        struct list_head        rq_list;
 
        struct xdr_buf          rq_private_buf;         /* The receive buffer
@@ -121,12 +102,21 @@ struct rpc_rqst {
 #define rq_svec                        rq_snd_buf.head
 #define rq_slen                        rq_snd_buf.len
 
-#define XPRT_LAST_FRAG         (1 << 0)
-#define XPRT_COPY_RECM         (1 << 1)
-#define XPRT_COPY_XID          (1 << 2)
-#define XPRT_COPY_DATA         (1 << 3)
+struct rpc_xprt_ops {
+       void            (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
+       int             (*reserve_xprt)(struct rpc_task *task);
+       void            (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+       void            (*connect)(struct rpc_task *task);
+       int             (*send_request)(struct rpc_task *task);
+       void            (*set_retrans_timeout)(struct rpc_task *task);
+       void            (*timer)(struct rpc_task *task);
+       void            (*release_request)(struct rpc_task *task);
+       void            (*close)(struct rpc_xprt *xprt);
+       void            (*destroy)(struct rpc_xprt *xprt);
+};
 
 struct rpc_xprt {
+       struct rpc_xprt_ops *   ops;            /* transport methods */
        struct socket *         sock;           /* BSD socket layer */
        struct sock *           inet;           /* INET layer */
 
@@ -137,11 +127,13 @@ struct rpc_xprt {
        unsigned long           cong;           /* current congestion */
        unsigned long           cwnd;           /* congestion window */
 
-       unsigned int            rcvsize,        /* socket receive buffer size */
-                               sndsize;        /* socket send buffer size */
+       size_t                  rcvsize,        /* transport rcv buffer size */
+                               sndsize;        /* transport send buffer size */
 
        size_t                  max_payload;    /* largest RPC payload size,
                                                   in bytes */
+       unsigned int            tsh_size;       /* size of transport specific
+                                                  header */
 
        struct rpc_wait_queue   sending;        /* requests waiting to send */
        struct rpc_wait_queue   resend;         /* requests waiting to resend */
@@ -150,11 +142,9 @@ struct rpc_xprt {
        struct list_head        free;           /* free slots */
        struct rpc_rqst *       slot;           /* slot table storage */
        unsigned int            max_reqs;       /* total slots */
-       unsigned long           sockstate;      /* Socket state */
+       unsigned long           state;          /* transport state */
        unsigned char           shutdown   : 1, /* being shut down */
-                               nocong     : 1, /* no congestion control */
-                               resvport   : 1, /* use a reserved port */
-                               stream     : 1; /* TCP */
+                               resvport   : 1; /* use a reserved port */
 
        /*
         * XID
@@ -171,22 +161,27 @@ struct rpc_xprt {
        unsigned long           tcp_copied,     /* copied to request */
                                tcp_flags;
        /*
-        * Connection of sockets
+        * Connection of transports
         */
-       struct work_struct      sock_connect;
+       unsigned long           connect_timeout,
+                               bind_timeout,
+                               reestablish_timeout;
+       struct work_struct      connect_worker;
        unsigned short          port;
+
        /*
-        * Disconnection of idle sockets
+        * Disconnection of idle transports
         */
        struct work_struct      task_cleanup;
        struct timer_list       timer;
-       unsigned long           last_used;
+       unsigned long           last_used,
+                               idle_timeout;
 
        /*
         * Send stuff
         */
-       spinlock_t              sock_lock;      /* lock socket info */
-       spinlock_t              xprt_lock;      /* lock xprt info */
+       spinlock_t              transport_lock; /* lock transport info */
+       spinlock_t              reserve_lock;   /* lock slot table */
        struct rpc_task *       snd_task;       /* Task blocked in send */
 
        struct list_head        recv;
@@ -195,37 +190,111 @@ struct rpc_xprt {
        void                    (*old_data_ready)(struct sock *, int);
        void                    (*old_state_change)(struct sock *);
        void                    (*old_write_space)(struct sock *);
-
-       wait_queue_head_t       cong_wait;
 };
 
+#define XPRT_LAST_FRAG         (1 << 0)
+#define XPRT_COPY_RECM         (1 << 1)
+#define XPRT_COPY_XID          (1 << 2)
+#define XPRT_COPY_DATA         (1 << 3)
+
 #ifdef __KERNEL__
 
-struct rpc_xprt *      xprt_create_proto(int proto, struct sockaddr_in *addr,
-                                       struct rpc_timeout *toparms);
-int                    xprt_destroy(struct rpc_xprt *);
-void                   xprt_set_timeout(struct rpc_timeout *, unsigned int,
-                                       unsigned long);
+/*
+ * Transport operations used by ULPs
+ */
+struct rpc_xprt *      xprt_create_proto(int proto, struct sockaddr_in *addr, struct rpc_timeout *to);
+void                   xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr);
 
-void                   xprt_reserve(struct rpc_task *);
-int                    xprt_prepare_transmit(struct rpc_task *);
-void                   xprt_transmit(struct rpc_task *);
-void                   xprt_receive(struct rpc_task *);
+/*
+ * Generic internal transport functions
+ */
+void                   xprt_connect(struct rpc_task *task);
+void                   xprt_reserve(struct rpc_task *task);
+int                    xprt_reserve_xprt(struct rpc_task *task);
+int                    xprt_reserve_xprt_cong(struct rpc_task *task);
+int                    xprt_prepare_transmit(struct rpc_task *task);
+void                   xprt_transmit(struct rpc_task *task);
+void                   xprt_abort_transmit(struct rpc_task *task);
 int                    xprt_adjust_timeout(struct rpc_rqst *req);
-void                   xprt_release(struct rpc_task *);
-void                   xprt_connect(struct rpc_task *);
-void                   xprt_sock_setbufsize(struct rpc_xprt *);
-
-#define XPRT_LOCKED    0
-#define XPRT_CONNECT   1
-#define XPRT_CONNECTING        2
-
-#define xprt_connected(xp)             (test_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_set_connected(xp)         (set_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_test_and_set_connected(xp)        (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_test_and_clear_connected(xp) \
-                                       (test_and_clear_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_clear_connected(xp)       (clear_bit(XPRT_CONNECT, &(xp)->sockstate))
+void                   xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_release(struct rpc_task *task);
+int                    xprt_destroy(struct rpc_xprt *xprt);
+
+static inline u32 *xprt_skip_transport_header(struct rpc_xprt *xprt, u32 *p)
+{
+       return p + xprt->tsh_size;
+}
+
+/*
+ * Transport switch helper functions
+ */
+void                   xprt_set_retrans_timeout_def(struct rpc_task *task);
+void                   xprt_set_retrans_timeout_rtt(struct rpc_task *task);
+void                   xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
+void                   xprt_wait_for_buffer_space(struct rpc_task *task);
+void                   xprt_write_space(struct rpc_xprt *xprt);
+void                   xprt_update_rtt(struct rpc_task *task);
+void                   xprt_adjust_cwnd(struct rpc_task *task, int result);
+struct rpc_rqst *      xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid);
+void                   xprt_complete_rqst(struct rpc_task *task, int copied);
+void                   xprt_release_rqst_cong(struct rpc_task *task);
+void                   xprt_disconnect(struct rpc_xprt *xprt);
+
+/*
+ * Socket transport setup operations
+ */
+int                    xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+int                    xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+
+/*
+ * Reserved bit positions in xprt->state
+ */
+#define XPRT_LOCKED            (0)
+#define XPRT_CONNECTED         (1)
+#define XPRT_CONNECTING                (2)
+
+static inline void xprt_set_connected(struct rpc_xprt *xprt)
+{
+       set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connected(struct rpc_xprt *xprt)
+{
+       clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_connected(struct rpc_xprt *xprt)
+{
+       return test_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
+{
+       return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
+{
+       return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
+{
+       smp_mb__before_clear_bit();
+       clear_bit(XPRT_CONNECTING, &xprt->state);
+       smp_mb__after_clear_bit();
+}
+
+static inline int xprt_connecting(struct rpc_xprt *xprt)
+{
+       return test_bit(XPRT_CONNECTING, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
+{
+       return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
+}
 
 #endif /* __KERNEL__*/
 
index ad15a54806d8134342d195e7b120e11fb584f7ac..ba448c760168a8e48cbb2ab59005095ebd1fb6b2 100644 (file)
@@ -71,7 +71,7 @@ void restore_processor_state(void);
 struct saved_context;
 void __save_processor_state(struct saved_context *ctxt);
 void __restore_processor_state(struct saved_context *ctxt);
-extern unsigned long get_usable_page(unsigned gfp_mask);
+extern unsigned long get_usable_page(gfp_t gfp_mask);
 extern void free_eaten_memory(void);
 
 #endif /* _LINUX_SWSUSP_H */
index a7bf1a3b149624398413fa64a31bb886c4ec35b4..20c975642cab4c7f17e1cd9c8b2a6b9a2938aa03 100644 (file)
@@ -171,8 +171,8 @@ extern int rotate_reclaimable_page(struct page *page);
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern int try_to_free_pages(struct zone **, unsigned int);
-extern int zone_reclaim(struct zone *, unsigned int, unsigned int);
+extern int try_to_free_pages(struct zone **, gfp_t);
+extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
 extern int shrink_all_memory(int);
 extern int vm_swappiness;
 
index 515046d1b2f42825efaef6959abf91893fcf0bb1..fc5bb4e91a5846303ecb9be8a8058768d000d7e1 100644 (file)
@@ -40,7 +40,7 @@ struct ts_state
 struct ts_ops
 {
        const char              *name;
-       struct ts_config *      (*init)(const void *, unsigned int, int);
+       struct ts_config *      (*init)(const void *, unsigned int, gfp_t);
        unsigned int            (*find)(struct ts_config *,
                                        struct ts_state *);
        void                    (*destroy)(struct ts_config *);
@@ -148,7 +148,7 @@ static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf)
 extern int textsearch_register(struct ts_ops *);
 extern int textsearch_unregister(struct ts_ops *);
 extern struct ts_config *textsearch_prepare(const char *, const void *,
-                                           unsigned int, int, int);
+                                           unsigned int, gfp_t, int);
 extern void textsearch_destroy(struct ts_config *conf);
 extern unsigned int textsearch_find_continuous(struct ts_config *,
                                               struct ts_state *,
index 0aee34f9da9f8bf7f267d957d5be7ba9faf281f5..21b9ce80364429abd04e9ef718834d0a6427eb15 100644 (file)
@@ -151,7 +151,12 @@ typedef unsigned long sector_t;
  */
 
 #ifdef __CHECKER__
-#define __bitwise __attribute__((bitwise))
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+#ifdef __CHECK_ENDIAN__
+#define __bitwise __bitwise__
 #else
 #define __bitwise
 #endif
@@ -166,7 +171,7 @@ typedef __u64 __bitwise __be64;
 #endif
 
 #ifdef __KERNEL__
-typedef unsigned __nocast gfp_t;
+typedef unsigned __bitwise__ gfp_t;
 #endif
 
 struct ustat {
index 4dbe580f9335833eb5fe78be9e241db64e5ef0fb..8f731e8f28215cd938e29fe54fca4ae47477c85c 100644 (file)
@@ -933,17 +933,17 @@ static inline void usb_fill_int_urb (struct urb *urb,
 }
 
 extern void usb_init_urb(struct urb *urb);
-extern struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags);
+extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags);
 extern void usb_free_urb(struct urb *urb);
 #define usb_put_urb usb_free_urb
 extern struct urb *usb_get_urb(struct urb *urb);
-extern int usb_submit_urb(struct urb *urb, unsigned mem_flags);
+extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
 extern int usb_unlink_urb(struct urb *urb);
 extern void usb_kill_urb(struct urb *urb);
 
 #define HAVE_USB_BUFFERS
 void *usb_buffer_alloc (struct usb_device *dev, size_t size,
-       unsigned mem_flags, dma_addr_t *dma);
+       gfp_t mem_flags, dma_addr_t *dma);
 void usb_buffer_free (struct usb_device *dev, size_t size,
        void *addr, dma_addr_t dma);
 
@@ -1050,7 +1050,7 @@ int usb_sg_init (
        struct scatterlist      *sg,
        int                     nents,
        size_t                  length,
-       unsigned                mem_flags
+       gfp_t                   mem_flags
 );
 void usb_sg_cancel (struct usb_sg_request *io);
 void usb_sg_wait (struct usb_sg_request *io);
index 71e60860732467955f9d078ef62d684ab5579f84..ff81117eb733f772aea4fd33ca8e2e2267f88503 100644 (file)
@@ -107,18 +107,18 @@ struct usb_ep_ops {
        int (*disable) (struct usb_ep *ep);
 
        struct usb_request *(*alloc_request) (struct usb_ep *ep,
-               unsigned gfp_flags);
+               gfp_t gfp_flags);
        void (*free_request) (struct usb_ep *ep, struct usb_request *req);
 
        void *(*alloc_buffer) (struct usb_ep *ep, unsigned bytes,
-               dma_addr_t *dma, unsigned gfp_flags);
+               dma_addr_t *dma, gfp_t gfp_flags);
        void (*free_buffer) (struct usb_ep *ep, void *buf, dma_addr_t dma,
                unsigned bytes);
        // NOTE:  on 2.6, drivers may also use dma_map() and
        // dma_sync_single_*() to directly manage dma overhead. 
 
        int (*queue) (struct usb_ep *ep, struct usb_request *req,
-               unsigned gfp_flags);
+               gfp_t gfp_flags);
        int (*dequeue) (struct usb_ep *ep, struct usb_request *req);
 
        int (*set_halt) (struct usb_ep *ep, int value);
@@ -214,7 +214,7 @@ usb_ep_disable (struct usb_ep *ep)
  * Returns the request, or null if one could not be allocated.
  */
 static inline struct usb_request *
-usb_ep_alloc_request (struct usb_ep *ep, unsigned gfp_flags)
+usb_ep_alloc_request (struct usb_ep *ep, gfp_t gfp_flags)
 {
        return ep->ops->alloc_request (ep, gfp_flags);
 }
@@ -254,7 +254,7 @@ usb_ep_free_request (struct usb_ep *ep, struct usb_request *req)
  */
 static inline void *
 usb_ep_alloc_buffer (struct usb_ep *ep, unsigned len, dma_addr_t *dma,
-       unsigned gfp_flags)
+       gfp_t gfp_flags)
 {
        return ep->ops->alloc_buffer (ep, len, dma, gfp_flags);
 }
@@ -330,7 +330,7 @@ usb_ep_free_buffer (struct usb_ep *ep, void *buf, dma_addr_t dma, unsigned len)
  * reported when the usb peripheral is disconnected.
  */
 static inline int
-usb_ep_queue (struct usb_ep *ep, struct usb_request *req, unsigned gfp_flags)
+usb_ep_queue (struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags)
 {
        return ep->ops->queue (ep, req, gfp_flags);
 }
index 167d956c492b12d484697dca5376b4855b37de86..dae9860091ddc8c99d472979b54b68577ac321ba 100644 (file)
@@ -265,15 +265,6 @@ typedef struct {
 #include <linux/tty_driver.h>
 #include <linux/tty_flip.h>
 
-
-#define        is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0)
-#define        is_alpha(ch) ((((ch)>=(unsigned)'a'&&(ch)<=(unsigned)'z')||\
-                 ((ch)>=(unsigned)'A'&&(ch)<=(unsigned)'Z'))?1:0)
-#define        is_hex_digit(ch) ((((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')||\
-                 ((ch)>=(unsigned)'a'&&(ch)<=(unsigned)'f')||\
-                 ((ch)>=(unsigned)'A'&&(ch)<=(unsigned)'F'))?1:0)
-
-
 /****** Data Structures *****************************************************/
 
 /* Adapter Data Space.
index 4a056a68243540fa610a9363bef13c6a0b59c7ef..6c196a5baf24bda85e3789163ebe4a6cdf74eb31 100644 (file)
@@ -94,7 +94,6 @@ struct dst_ops
        struct dst_entry *      (*negative_advice)(struct dst_entry *);
        void                    (*link_failure)(struct sk_buff *);
        void                    (*update_pmtu)(struct dst_entry *dst, u32 mtu);
-       int                     (*get_mss)(struct dst_entry *dst, u32 mtu);
        int                     entry_size;
 
        atomic_t                entries;
index dc36b1be6745ac7c8b2e8e28810b9b99bc49de66..5e38dca1d08204542340d7706e852cca39006192 100644 (file)
  *
  * Adaption to a generic IEEE 802.11 stack by James Ketrenos
  * <jketreno@linux.intel.com>
- * Copyright (c) 2004, Intel Corporation
+ * Copyright (c) 2004-2005, Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation. See README and COPYING for
  * more details.
+ *
+ * API Version History
+ * 1.0.x -- Initial version
+ * 1.1.x -- Added radiotap, QoS, TIM, ieee80211_geo APIs,
+ *          various structure changes, and crypto API init method
  */
 #ifndef IEEE80211_H
 #define IEEE80211_H
-#include <linux/if_ether.h> /* ETH_ALEN */
-#include <linux/kernel.h>   /* ARRAY_SIZE */
+#include <linux/if_ether.h>    /* ETH_ALEN */
+#include <linux/kernel.h>      /* ARRAY_SIZE */
 #include <linux/wireless.h>
 
+#define IEEE80211_VERSION "git-1.1.6"
+
 #define IEEE80211_DATA_LEN             2304
 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
    6.2.1.1.2.
    represents the 2304 bytes of real data, plus a possible 8 bytes of
    WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
 
-
-#define IEEE80211_HLEN                 30
-#define IEEE80211_FRAME_LEN            (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct ieee80211_hdr {
-       __le16 frame_ctl;
-       __le16 duration_id;
-       u8 addr1[ETH_ALEN];
-       u8 addr2[ETH_ALEN];
-       u8 addr3[ETH_ALEN];
-       __le16 seq_ctl;
-       u8 addr4[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_3addr {
-       __le16 frame_ctl;
-       __le16 duration_id;
-       u8 addr1[ETH_ALEN];
-       u8 addr2[ETH_ALEN];
-       u8 addr3[ETH_ALEN];
-       __le16 seq_ctl;
-} __attribute__ ((packed));
-
 #define IEEE80211_1ADDR_LEN 10
 #define IEEE80211_2ADDR_LEN 16
 #define IEEE80211_3ADDR_LEN 24
 #define IEEE80211_4ADDR_LEN 30
 #define IEEE80211_FCS_LEN    4
+#define IEEE80211_HLEN                 (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN            (IEEE80211_DATA_LEN + IEEE80211_HLEN)
 
 #define MIN_FRAG_THRESHOLD     256U
 #define        MAX_FRAG_THRESHOLD     2346U
@@ -113,11 +99,11 @@ struct ieee80211_hdr_3addr {
 #define IEEE80211_STYPE_CFACK          0x0050
 #define IEEE80211_STYPE_CFPOLL         0x0060
 #define IEEE80211_STYPE_CFACKPOLL      0x0070
+#define IEEE80211_STYPE_QOS_DATA        0x0080
 
 #define IEEE80211_SCTL_FRAG            0x000F
 #define IEEE80211_SCTL_SEQ             0xFFF0
 
-
 /* debug macros */
 
 #ifdef CONFIG_IEEE80211_DEBUG
@@ -128,8 +114,7 @@ do { if (ieee80211_debug_level & (level)) \
          in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
 #else
 #define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#endif /* CONFIG_IEEE80211_DEBUG */
-
+#endif                         /* CONFIG_IEEE80211_DEBUG */
 
 /* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
 
@@ -140,7 +125,6 @@ do { if (ieee80211_debug_level & (level)) \
  * messages. It should never be used for passing essid to user space. */
 const char *escape_essid(const char *essid, u8 essid_len);
 
-
 /*
  * To use the debug system:
  *
@@ -177,6 +161,7 @@ const char *escape_essid(const char *essid, u8 essid_len);
 
 #define IEEE80211_DL_TX            (1<<8)
 #define IEEE80211_DL_RX            (1<<9)
+#define IEEE80211_DL_QOS           (1<<31)
 
 #define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
 #define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
@@ -190,9 +175,10 @@ const char *escape_essid(const char *essid, u8 essid_len);
 #define IEEE80211_DEBUG_DROP(f, a...)  IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a)
 #define IEEE80211_DEBUG_TX(f, a...)  IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a)
 #define IEEE80211_DEBUG_RX(f, a...)  IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a)
+#define IEEE80211_DEBUG_QOS(f, a...)  IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a)
 #include <linux/netdevice.h>
 #include <linux/wireless.h>
-#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <linux/if_arp.h>      /* ARPHRD_ETHER */
 
 #ifndef WIRELESS_SPY
 #define WIRELESS_SPY           /* enable iwspy support */
@@ -200,10 +186,10 @@ const char *escape_essid(const char *essid, u8 essid_len);
 #include <net/iw_handler.h>    /* new driver API */
 
 #ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
+#define ETH_P_PAE 0x888E       /* Port Access Entity (IEEE 802.1X) */
+#endif                         /* ETH_P_PAE */
 
-#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
+#define ETH_P_PREAUTH 0x88C7   /* IEEE 802.11i pre-authentication */
 
 #ifndef ETH_P_80211_RAW
 #define ETH_P_80211_RAW (ETH_P_ECONET + 1)
@@ -215,10 +201,10 @@ const char *escape_essid(const char *essid, u8 essid_len);
 
 struct ieee80211_snap_hdr {
 
-        u8    dsap;   /* always 0xAA */
-        u8    ssap;   /* always 0xAA */
-        u8    ctrl;   /* always 0x03 */
-        u8    oui[P80211_OUI_LEN];    /* organizational universal id */
+       u8 dsap;                /* always 0xAA */
+       u8 ssap;                /* always 0xAA */
+       u8 ctrl;                /* always 0x03 */
+       u8 oui[P80211_OUI_LEN]; /* organizational universal id */
 
 } __attribute__ ((packed));
 
@@ -246,8 +232,9 @@ struct ieee80211_snap_hdr {
 #define WLAN_CAPABILITY_PBCC (1<<6)
 #define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
 #define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
+#define WLAN_CAPABILITY_QOS (1<<9)
 #define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
-#define WLAN_CAPABILITY_OSSS_OFDM (1<<13)
+#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
 
 /* Status codes */
 enum ieee80211_statuscode {
@@ -312,14 +299,12 @@ enum ieee80211_reasoncode {
        WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
 };
 
-
 #define IEEE80211_STATMASK_SIGNAL (1<<0)
 #define IEEE80211_STATMASK_RSSI (1<<1)
 #define IEEE80211_STATMASK_NOISE (1<<2)
 #define IEEE80211_STATMASK_RATE (1<<3)
 #define IEEE80211_STATMASK_WEMASK 0x7
 
-
 #define IEEE80211_CCK_MODULATION    (1<<0)
 #define IEEE80211_OFDM_MODULATION   (1<<1)
 
@@ -377,9 +362,6 @@ enum ieee80211_reasoncode {
 #define IEEE80211_NUM_CCK_RATES                    4
 #define IEEE80211_OFDM_SHIFT_MASK_A         4
 
-
-
-
 /* NOTE: This data is for statistical purposes; not all hardware provides this
  *       information for frames received.  Not setting these will not cause
  *       any adverse affects. */
@@ -388,7 +370,7 @@ struct ieee80211_rx_stats {
        s8 rssi;
        u8 signal;
        u8 noise;
-       u16 rate; /* in 100 kbps */
+       u16 rate;               /* in 100 kbps */
        u8 received_channel;
        u8 control;
        u8 mask;
@@ -439,38 +421,44 @@ struct ieee80211_device;
 
 #include "ieee80211_crypt.h"
 
-#define SEC_KEY_1         (1<<0)
-#define SEC_KEY_2         (1<<1)
-#define SEC_KEY_3         (1<<2)
-#define SEC_KEY_4         (1<<3)
-#define SEC_ACTIVE_KEY    (1<<4)
-#define SEC_AUTH_MODE     (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL         (1<<7)
-#define SEC_ENABLED       (1<<8)
-
-#define SEC_LEVEL_0      0 /* None */
-#define SEC_LEVEL_1      1 /* WEP 40 and 104 bit */
-#define SEC_LEVEL_2      2 /* Level 1 + TKIP */
-#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
-#define SEC_LEVEL_3      4 /* Level 2 + CCMP */
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
+#define SEC_KEY_1              (1<<0)
+#define SEC_KEY_2              (1<<1)
+#define SEC_KEY_3              (1<<2)
+#define SEC_KEY_4              (1<<3)
+#define SEC_ACTIVE_KEY         (1<<4)
+#define SEC_AUTH_MODE          (1<<5)
+#define SEC_UNICAST_GROUP      (1<<6)
+#define SEC_LEVEL              (1<<7)
+#define SEC_ENABLED            (1<<8)
+#define SEC_ENCRYPT            (1<<9)
+
+#define SEC_LEVEL_0            0       /* None */
+#define SEC_LEVEL_1            1       /* WEP 40 and 104 bit */
+#define SEC_LEVEL_2            2       /* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP       3       /* Level 1 + CKIP */
+#define SEC_LEVEL_3            4       /* Level 2 + CCMP */
+
+#define SEC_ALG_NONE           0
+#define SEC_ALG_WEP            1
+#define SEC_ALG_TKIP           2
+#define SEC_ALG_CCMP           3
+
+#define WEP_KEYS               4
+#define WEP_KEY_LEN            13
+#define SCM_KEY_LEN            32
+#define SCM_TEMPORAL_KEY_LENGTH        16
 
 struct ieee80211_security {
        u16 active_key:2,
-            enabled:1,
-           auth_mode:2,
-            auth_algo:4,
-            unicast_uses_group:1;
+           enabled:1,
+           auth_mode:2, auth_algo:4, unicast_uses_group:1, encrypt:1;
+       u8 encode_alg[WEP_KEYS];
        u8 key_sizes[WEP_KEYS];
-       u8 keys[WEP_KEYS][WEP_KEY_LEN];
+       u8 keys[WEP_KEYS][SCM_KEY_LEN];
        u8 level;
        u16 flags;
 } __attribute__ ((packed));
 
-
 /*
 
  802.11 data frame from AP
@@ -494,7 +482,7 @@ enum ieee80211_mfie {
        MFIE_TYPE_RATES = 1,
        MFIE_TYPE_FH_SET = 2,
        MFIE_TYPE_DS_SET = 3,
-       MFIE_TYPE_CF_SET =  4,
+       MFIE_TYPE_CF_SET = 4,
        MFIE_TYPE_TIM = 5,
        MFIE_TYPE_IBSS_SET = 6,
        MFIE_TYPE_COUNTRY = 7,
@@ -516,11 +504,75 @@ enum ieee80211_mfie {
        MFIE_TYPE_RSN = 48,
        MFIE_TYPE_RATES_EX = 50,
        MFIE_TYPE_GENERIC = 221,
+       MFIE_TYPE_QOS_PARAMETER = 222,
 };
 
-struct ieee80211_info_element_hdr {
-       u8 id;
-       u8 len;
+/* Minimal header; can be used for passing 802.11 frames with sufficient
+ * information to determine what type of underlying data type is actually
+ * stored in the data. */
+struct ieee80211_hdr {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_1addr {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 addr1[ETH_ALEN];
+       u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_2addr {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_3addr {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
+       __le16 seq_ctl;
+       u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_4addr {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
+       __le16 seq_ctl;
+       u8 addr4[ETH_ALEN];
+       u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_3addrqos {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
+       __le16 seq_ctl;
+       u8 payload[0];
+       __le16 qos_ctl;
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_4addrqos {
+       __le16 frame_ctl;
+       __le16 duration_id;
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
+       __le16 seq_ctl;
+       u8 addr4[ETH_ALEN];
+       u8 payload[0];
+       __le16 qos_ctl;
 } __attribute__ ((packed));
 
 struct ieee80211_info_element {
@@ -546,49 +598,77 @@ struct ieee80211_info_element {
        u16 status;
 */
 
-struct ieee80211_authentication {
+struct ieee80211_auth {
        struct ieee80211_hdr_3addr header;
        __le16 algorithm;
        __le16 transaction;
        __le16 status;
-       struct ieee80211_info_element info_element;
+       /* challenge */
+       struct ieee80211_info_element info_element[0];
 } __attribute__ ((packed));
 
+struct ieee80211_disassoc {
+       struct ieee80211_hdr_3addr header;
+       __le16 reason;
+} __attribute__ ((packed));
+
+/* Alias deauth for disassoc */
+#define ieee80211_deauth ieee80211_disassoc
+
+struct ieee80211_probe_request {
+       struct ieee80211_hdr_3addr header;
+       /* SSID, supported rates */
+       struct ieee80211_info_element info_element[0];
+} __attribute__ ((packed));
 
 struct ieee80211_probe_response {
        struct ieee80211_hdr_3addr header;
        u32 time_stamp[2];
        __le16 beacon_interval;
        __le16 capability;
-       struct ieee80211_info_element info_element;
+       /* SSID, supported rates, FH params, DS params,
+        * CF params, IBSS params, TIM (if beacon), RSN */
+       struct ieee80211_info_element info_element[0];
 } __attribute__ ((packed));
 
-struct ieee80211_assoc_request_frame {
+/* Alias beacon for probe_response */
+#define ieee80211_beacon ieee80211_probe_response
+
+struct ieee80211_assoc_request {
+       struct ieee80211_hdr_3addr header;
+       __le16 capability;
+       __le16 listen_interval;
+       /* SSID, supported rates, RSN */
+       struct ieee80211_info_element info_element[0];
+} __attribute__ ((packed));
+
+struct ieee80211_reassoc_request {
+       struct ieee80211_hdr_3addr header;
        __le16 capability;
        __le16 listen_interval;
        u8 current_ap[ETH_ALEN];
-       struct ieee80211_info_element info_element;
+       struct ieee80211_info_element info_element[0];
 } __attribute__ ((packed));
 
-struct ieee80211_assoc_response_frame {
+struct ieee80211_assoc_response {
        struct ieee80211_hdr_3addr header;
        __le16 capability;
        __le16 status;
        __le16 aid;
-       struct ieee80211_info_element info_element; /* supported rates */
+       /* supported rates */
+       struct ieee80211_info_element info_element[0];
 } __attribute__ ((packed));
 
-
 struct ieee80211_txb {
        u8 nr_frags;
        u8 encrypted;
-       u16 reserved;
-       u16 frag_size;
-       u16 payload_size;
+       u8 rts_included;
+       u8 reserved;
+       __le16 frag_size;
+       __le16 payload_size;
        struct sk_buff *fragments[0];
 };
 
-
 /* SWEEP TABLE ENTRIES NUMBER */
 #define MAX_SWEEP_TAB_ENTRIES            42
 #define MAX_SWEEP_TAB_ENTRIES_PER_PACKET  7
@@ -604,9 +684,68 @@ struct ieee80211_txb {
 
 #define MAX_WPA_IE_LEN 64
 
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM    (1<<1)
-#define NETWORK_HAS_CCK     (1<<2)
+#define NETWORK_EMPTY_ESSID    (1<<0)
+#define NETWORK_HAS_OFDM       (1<<1)
+#define NETWORK_HAS_CCK        (1<<2)
+
+/* QoS structure */
+#define NETWORK_HAS_QOS_PARAMETERS      (1<<3)
+#define NETWORK_HAS_QOS_INFORMATION     (1<<4)
+#define NETWORK_HAS_QOS_MASK            (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION)
+
+#define QOS_QUEUE_NUM                   4
+#define QOS_OUI_LEN                     3
+#define QOS_OUI_TYPE                    2
+#define QOS_ELEMENT_ID                  221
+#define QOS_OUI_INFO_SUB_TYPE           0
+#define QOS_OUI_PARAM_SUB_TYPE          1
+#define QOS_VERSION_1                   1
+#define QOS_AIFSN_MIN_VALUE             2
+
+struct ieee80211_qos_information_element {
+       u8 elementID;
+       u8 length;
+       u8 qui[QOS_OUI_LEN];
+       u8 qui_type;
+       u8 qui_subtype;
+       u8 version;
+       u8 ac_info;
+} __attribute__ ((packed));
+
+struct ieee80211_qos_ac_parameter {
+       u8 aci_aifsn;
+       u8 ecw_min_max;
+       __le16 tx_op_limit;
+} __attribute__ ((packed));
+
+struct ieee80211_qos_parameter_info {
+       struct ieee80211_qos_information_element info_element;
+       u8 reserved;
+       struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
+} __attribute__ ((packed));
+
+struct ieee80211_qos_parameters {
+       __le16 cw_min[QOS_QUEUE_NUM];
+       __le16 cw_max[QOS_QUEUE_NUM];
+       u8 aifs[QOS_QUEUE_NUM];
+       u8 flag[QOS_QUEUE_NUM];
+       __le16 tx_op_limit[QOS_QUEUE_NUM];
+} __attribute__ ((packed));
+
+struct ieee80211_qos_data {
+       struct ieee80211_qos_parameters parameters;
+       int active;
+       int supported;
+       u8 param_count;
+       u8 old_param_count;
+};
+
+struct ieee80211_tim_parameters {
+       u8 tim_count;
+       u8 tim_period;
+} __attribute__ ((packed));
+
+/*******************************************************/
 
 struct ieee80211_network {
        /* These entries are used to identify a unique network */
@@ -616,6 +755,8 @@ struct ieee80211_network {
        u8 ssid[IW_ESSID_MAX_SIZE + 1];
        u8 ssid_len;
 
+       struct ieee80211_qos_data qos_data;
+
        /* These are network statistics */
        struct ieee80211_rx_stats stats;
        u16 capability;
@@ -631,10 +772,12 @@ struct ieee80211_network {
        u16 beacon_interval;
        u16 listen_interval;
        u16 atim_window;
+       u8 erp_value;
        u8 wpa_ie[MAX_WPA_IE_LEN];
        size_t wpa_ie_len;
        u8 rsn_ie[MAX_WPA_IE_LEN];
        size_t rsn_ie_len;
+       struct ieee80211_tim_parameters tim;
        struct list_head list;
 };
 
@@ -651,17 +794,52 @@ enum ieee80211_state {
 #define DEFAULT_MAX_SCAN_AGE (15 * HZ)
 #define DEFAULT_FTS 2346
 
-
 #define CFG_IEEE80211_RESERVE_FCS (1<<0)
 #define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+#define CFG_IEEE80211_RTS (1<<2)
+
+#define IEEE80211_24GHZ_MIN_CHANNEL 1
+#define IEEE80211_24GHZ_MAX_CHANNEL 14
+#define IEEE80211_24GHZ_CHANNELS    14
+
+#define IEEE80211_52GHZ_MIN_CHANNEL 36
+#define IEEE80211_52GHZ_MAX_CHANNEL 165
+#define IEEE80211_52GHZ_CHANNELS    32
+
+enum {
+       IEEE80211_CH_PASSIVE_ONLY = (1 << 0),
+       IEEE80211_CH_B_ONLY = (1 << 2),
+       IEEE80211_CH_NO_IBSS = (1 << 3),
+       IEEE80211_CH_UNIFORM_SPREADING = (1 << 4),
+       IEEE80211_CH_RADAR_DETECT = (1 << 5),
+       IEEE80211_CH_INVALID = (1 << 6),
+};
+
+struct ieee80211_channel {
+       u32 freq;
+       u8 channel;
+       u8 flags;
+       u8 max_power;
+};
+
+struct ieee80211_geo {
+       u8 name[4];
+       u8 bg_channels;
+       u8 a_channels;
+       struct ieee80211_channel bg[IEEE80211_24GHZ_CHANNELS];
+       struct ieee80211_channel a[IEEE80211_52GHZ_CHANNELS];
+};
 
 struct ieee80211_device {
        struct net_device *dev;
+       struct ieee80211_security sec;
 
        /* Bookkeeping structures */
        struct net_device_stats stats;
        struct ieee80211_stats ieee_stats;
 
+       struct ieee80211_geo geo;
+
        /* Probe / Beacon management */
        struct list_head network_free_list;
        struct list_head network_list;
@@ -669,62 +847,102 @@ struct ieee80211_device {
        int scans;
        int scan_age;
 
-       int iw_mode; /* operating mode (IW_MODE_*) */
+       int iw_mode;            /* operating mode (IW_MODE_*) */
+       struct iw_spy_data spy_data;    /* iwspy support */
 
        spinlock_t lock;
 
-       int tx_headroom; /* Set to size of any additional room needed at front
-                         * of allocated Tx SKBs */
+       int tx_headroom;        /* Set to size of any additional room needed at front
+                                * of allocated Tx SKBs */
        u32 config;
 
        /* WEP and other encryption related settings at the device level */
-       int open_wep; /* Set to 1 to allow unencrypted frames */
+       int open_wep;           /* Set to 1 to allow unencrypted frames */
 
-       int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
+       int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
                                 * WEP key changes */
 
        /* If the host performs {en,de}cryption, then set to 1 */
        int host_encrypt;
+       int host_encrypt_msdu;
        int host_decrypt;
-       int ieee802_1x; /* is IEEE 802.1X used */
+       /* host performs multicast decryption */
+       int host_mc_decrypt;
+
+       int host_open_frag;
+       int host_build_iv;
+       int ieee802_1x;         /* is IEEE 802.1X used */
 
        /* WPA data */
        int wpa_enabled;
        int drop_unencrypted;
-       int tkip_countermeasures;
        int privacy_invoked;
        size_t wpa_ie_len;
        u8 *wpa_ie;
 
        struct list_head crypt_deinit_list;
        struct ieee80211_crypt_data *crypt[WEP_KEYS];
-       int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
+       int tx_keyidx;          /* default TX key index (crypt[tx_keyidx]) */
        struct timer_list crypt_deinit_timer;
+       int crypt_quiesced;
 
-       int bcrx_sta_key; /* use individual keys to override default keys even
-                          * with RX of broad/multicast frames */
+       int bcrx_sta_key;       /* use individual keys to override default keys even
+                                * with RX of broad/multicast frames */
 
        /* Fragmentation structures */
        struct ieee80211_frag_entry frag_cache[IEEE80211_FRAG_CACHE_LEN];
        unsigned int frag_next_idx;
-       u16 fts; /* Fragmentation Threshold */
+       u16 fts;                /* Fragmentation Threshold */
+       u16 rts;                /* RTS threshold */
 
        /* Association info */
        u8 bssid[ETH_ALEN];
 
        enum ieee80211_state state;
 
-       int mode;       /* A, B, G */
-       int modulation; /* CCK, OFDM */
-       int freq_band;  /* 2.4Ghz, 5.2Ghz, Mixed */
-       int abg_ture;   /* ABG flag              */
+       int mode;               /* A, B, G */
+       int modulation;         /* CCK, OFDM */
+       int freq_band;          /* 2.4Ghz, 5.2Ghz, Mixed */
+       int abg_true;           /* ABG flag              */
+
+       int perfect_rssi;
+       int worst_rssi;
 
        /* Callback functions */
-       void (*set_security)(struct net_device *dev,
-                            struct ieee80211_security *sec);
-       int (*hard_start_xmit)(struct ieee80211_txb *txb,
-                              struct net_device *dev);
-       int (*reset_port)(struct net_device *dev);
+       void (*set_security) (struct net_device * dev,
+                             struct ieee80211_security * sec);
+       int (*hard_start_xmit) (struct ieee80211_txb * txb,
+                               struct net_device * dev, int pri);
+       int (*reset_port) (struct net_device * dev);
+       int (*is_queue_full) (struct net_device * dev, int pri);
+
+       int (*handle_management) (struct net_device * dev,
+                                 struct ieee80211_network * network, u16 type);
+
+       /* Typical STA methods */
+       int (*handle_auth) (struct net_device * dev,
+                           struct ieee80211_auth * auth);
+       int (*handle_deauth) (struct net_device * dev,
+                             struct ieee80211_auth * auth);
+       int (*handle_disassoc) (struct net_device * dev,
+                               struct ieee80211_disassoc * assoc);
+       int (*handle_beacon) (struct net_device * dev,
+                             struct ieee80211_beacon * beacon,
+                             struct ieee80211_network * network);
+       int (*handle_probe_response) (struct net_device * dev,
+                                     struct ieee80211_probe_response * resp,
+                                     struct ieee80211_network * network);
+       int (*handle_probe_request) (struct net_device * dev,
+                                    struct ieee80211_probe_request * req,
+                                    struct ieee80211_rx_stats * stats);
+       int (*handle_assoc_response) (struct net_device * dev,
+                                     struct ieee80211_assoc_response * resp,
+                                     struct ieee80211_network * network);
+
+       /* Typical AP methods */
+       int (*handle_assoc_request) (struct net_device * dev);
+       int (*handle_reassoc_request) (struct net_device * dev,
+                                      struct ieee80211_reassoc_request * req);
 
        /* This must be the last item so that it points to the data
         * allocated beyond this structure by alloc_ieee80211 */
@@ -736,12 +954,12 @@ struct ieee80211_device {
 #define IEEE_G            (1<<2)
 #define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
 
-extern inline void *ieee80211_priv(struct net_device *dev)
+static inline void *ieee80211_priv(struct net_device *dev)
 {
        return ((struct ieee80211_device *)netdev_priv(dev))->priv;
 }
 
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
 {
        /* Single white space is for Linksys APs */
        if (essid_len == 1 && essid[0] == ' ')
@@ -757,7 +975,8 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
        return 1;
 }
 
-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
+static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee,
+                                         int mode)
 {
        /*
         * It is possible for both access points and our device to support
@@ -783,14 +1002,17 @@ extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mod
        return 0;
 }
 
-extern inline int ieee80211_get_hdrlen(u16 fc)
+static inline int ieee80211_get_hdrlen(u16 fc)
 {
        int hdrlen = IEEE80211_3ADDR_LEN;
+       u16 stype = WLAN_FC_GET_STYPE(fc);
 
        switch (WLAN_FC_GET_TYPE(fc)) {
        case IEEE80211_FTYPE_DATA:
                if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
                        hdrlen = IEEE80211_4ADDR_LEN;
+               if (stype & IEEE80211_STYPE_QOS_DATA)
+                       hdrlen += 2;
                break;
        case IEEE80211_FTYPE_CTL:
                switch (WLAN_FC_GET_STYPE(fc)) {
@@ -808,7 +1030,48 @@ extern inline int ieee80211_get_hdrlen(u16 fc)
        return hdrlen;
 }
 
+static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr)
+{
+       switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
+       case IEEE80211_1ADDR_LEN:
+               return ((struct ieee80211_hdr_1addr *)hdr)->payload;
+       case IEEE80211_2ADDR_LEN:
+               return ((struct ieee80211_hdr_2addr *)hdr)->payload;
+       case IEEE80211_3ADDR_LEN:
+               return ((struct ieee80211_hdr_3addr *)hdr)->payload;
+       case IEEE80211_4ADDR_LEN:
+               return ((struct ieee80211_hdr_4addr *)hdr)->payload;
+       }
+
+}
+
+static inline int ieee80211_is_ofdm_rate(u8 rate)
+{
+       switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+       case IEEE80211_OFDM_RATE_6MB:
+       case IEEE80211_OFDM_RATE_9MB:
+       case IEEE80211_OFDM_RATE_12MB:
+       case IEEE80211_OFDM_RATE_18MB:
+       case IEEE80211_OFDM_RATE_24MB:
+       case IEEE80211_OFDM_RATE_36MB:
+       case IEEE80211_OFDM_RATE_48MB:
+       case IEEE80211_OFDM_RATE_54MB:
+               return 1;
+       }
+       return 0;
+}
 
+static inline int ieee80211_is_cck_rate(u8 rate)
+{
+       switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+       case IEEE80211_CCK_RATE_1MB:
+       case IEEE80211_CCK_RATE_2MB:
+       case IEEE80211_CCK_RATE_5MB:
+       case IEEE80211_CCK_RATE_11MB:
+               return 1;
+       }
+       return 0;
+}
 
 /* ieee80211.c */
 extern void free_ieee80211(struct net_device *dev);
@@ -817,18 +1080,30 @@ extern struct net_device *alloc_ieee80211(int sizeof_priv);
 extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
 
 /* ieee80211_tx.c */
-extern int ieee80211_xmit(struct sk_buff *skb,
-                         struct net_device *dev);
+extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
 extern void ieee80211_txb_free(struct ieee80211_txb *);
-
+extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
+                             struct ieee80211_hdr *frame, int len);
 
 /* ieee80211_rx.c */
 extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                        struct ieee80211_rx_stats *rx_stats);
 extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
-                            struct ieee80211_hdr *header,
+                            struct ieee80211_hdr_4addr *header,
                             struct ieee80211_rx_stats *stats);
 
+/* ieee80211_geo.c */
+extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device
+                                                    *ieee);
+extern int ieee80211_set_geo(struct ieee80211_device *ieee,
+                            const struct ieee80211_geo *geo);
+
+extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee,
+                                     u8 channel);
+extern int ieee80211_channel_to_index(struct ieee80211_device *ieee,
+                                     u8 channel);
+extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq);
+
 /* ieee80211_wx.c */
 extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
                                 struct iw_request_info *info,
@@ -839,17 +1114,21 @@ extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
 extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
                                   struct iw_request_info *info,
                                   union iwreq_data *wrqu, char *key);
-
-
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
+extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
+                                     struct iw_request_info *info,
+                                     union iwreq_data *wrqu, char *extra);
+extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
+                                     struct iw_request_info *info,
+                                     union iwreq_data *wrqu, char *extra);
+
+static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
 {
        ieee->scans++;
 }
 
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
+static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
 {
        return ieee->scans;
 }
 
-
-#endif /* IEEE80211_H */
+#endif                         /* IEEE80211_H */
index b58a3bcc0dc0972c882f5992ad02264510769bf5..0a1c2d82ca4b9091b9f9dce9d575e5e04199c194 100644 (file)
 
 #include <linux/skbuff.h>
 
+enum {
+       IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
+};
+
 struct ieee80211_crypto_ops {
        const char *name;
 
        /* init new crypto context (e.g., allocate private data space,
         * select IV, etc.); returns NULL on failure or pointer to allocated
         * private data on success */
-       void * (*init)(int keyidx);
+       void *(*init) (int keyidx);
 
        /* deinitialize crypto context and free allocated private data */
-       void (*deinit)(void *priv);
+       void (*deinit) (void *priv);
+
+       int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv);
 
        /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
         * value from decrypt_mpdu is passed as the keyidx value for
@@ -42,34 +48,39 @@ struct ieee80211_crypto_ops {
         * encryption; if not, error will be returned; these functions are
         * called for all MPDUs (i.e., fragments).
         */
-       int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-       int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
+       int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
+       int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
 
        /* These functions are called for full MSDUs, i.e. full frames.
         * These can be NULL if full MSDU operations are not needed. */
-       int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
-       int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
-                           void *priv);
+       int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
+       int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
+                            void *priv);
 
-       int (*set_key)(void *key, int len, u8 *seq, void *priv);
-       int (*get_key)(void *key, int len, u8 *seq, void *priv);
+       int (*set_key) (void *key, int len, u8 * seq, void *priv);
+       int (*get_key) (void *key, int len, u8 * seq, void *priv);
 
        /* procfs handler for printing out key information and possible
         * statistics */
-       char * (*print_stats)(char *p, void *priv);
+       char *(*print_stats) (char *p, void *priv);
+
+       /* Crypto specific flag get/set for configuration settings */
+       unsigned long (*get_flags) (void *priv);
+       unsigned long (*set_flags) (unsigned long flags, void *priv);
 
        /* maximum number of bytes added by encryption; encrypt buf is
         * allocated with extra_prefix_len bytes, copy of in_buf, and
         * extra_postfix_len; encrypt need not use all this space, but
         * the result must start at the beginning of the buffer and correct
         * length must be returned */
-       int extra_prefix_len, extra_postfix_len;
+       int extra_mpdu_prefix_len, extra_mpdu_postfix_len;
+       int extra_msdu_prefix_len, extra_msdu_postfix_len;
 
        struct module *owner;
 };
 
 struct ieee80211_crypt_data {
-       struct list_head list; /* delayed deletion list */
+       struct list_head list;  /* delayed deletion list */
        struct ieee80211_crypto_ops *ops;
        void *priv;
        atomic_t refcnt;
@@ -77,10 +88,11 @@ struct ieee80211_crypt_data {
 
 int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
 int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
+struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
 void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
 void ieee80211_crypt_deinit_handler(unsigned long);
 void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
                                    struct ieee80211_crypt_data **crypt);
+void ieee80211_crypt_quiescing(struct ieee80211_device *ieee);
 
 #endif
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
new file mode 100644 (file)
index 0000000..429b738
--- /dev/null
@@ -0,0 +1,231 @@
+/* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.5 2005/01/22 20:12:05 sam Exp $ */
+/* $NetBSD: ieee80211_radiotap.h,v 1.11 2005/06/22 06:16:02 dyoung Exp $ */
+
+/*-
+ * Copyright (c) 2003, 2004 David Young.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL DAVID
+ * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+
+/*
+ * Modifications to fit into the linux IEEE 802.11 stack,
+ * Mike Kershaw (dragorn@kismetwireless.net)
+ */
+
+#ifndef IEEE80211RADIOTAP_H
+#define IEEE80211RADIOTAP_H
+
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+
+/* Radiotap header version (from official NetBSD feed) */
+#define IEEE80211RADIOTAP_VERSION      "1.5"
+/* Base version of the radiotap packet header data */
+#define PKTHDR_RADIOTAP_VERSION                0
+
+/* A generic radio capture format is desirable. There is one for
+ * Linux, but it is neither rigidly defined (there were not even
+ * units given for some fields) nor easily extensible.
+ *
+ * I suggest the following extensible radio capture format. It is
+ * based on a bitmap indicating which fields are present.
+ *
+ * I am trying to describe precisely what the application programmer
+ * should expect in the following, and for that reason I tell the
+ * units and origin of each measurement (where it applies), or else I
+ * use sufficiently weaselly language ("is a monotonically nondecreasing
+ * function of...") that I cannot set false expectations for lawyerly
+ * readers.
+ */
+
+/* XXX tcpdump/libpcap do not tolerate variable-length headers,
+ * yet, so we pad every radiotap header to 64 bytes. Ugh.
+ */
+#define IEEE80211_RADIOTAP_HDRLEN      64
+
+/* The radio capture header precedes the 802.11 header. */
+struct ieee80211_radiotap_header {
+       u8 it_version;          /* Version 0. Only increases
+                                * for drastic changes,
+                                * introduction of compatible
+                                * new fields does not count.
+                                */
+       u8 it_pad;
+       u16 it_len;             /* length of the whole
+                                * header in bytes, including
+                                * it_version, it_pad,
+                                * it_len, and data fields.
+                                */
+       u32 it_present;         /* A bitmap telling which
+                                * fields are present. Set bit 31
+                                * (0x80000000) to extend the
+                                * bitmap by another 32 bits.
+                                * Additional extensions are made
+                                * by setting bit 31.
+                                */
+};
+
+/* Name                                 Data type       Units
+ * ----                                 ---------       -----
+ *
+ * IEEE80211_RADIOTAP_TSFT              u64       microseconds
+ *
+ *      Value in microseconds of the MAC's 64-bit 802.11 Time
+ *      Synchronization Function timer when the first bit of the
+ *      MPDU arrived at the MAC. For received frames, only.
+ *
+ * IEEE80211_RADIOTAP_CHANNEL           2 x u16   MHz, bitmap
+ *
+ *      Tx/Rx frequency in MHz, followed by flags (see below).
+ *
+ * IEEE80211_RADIOTAP_FHSS              u16       see below
+ *
+ *      For frequency-hopping radios, the hop set (first byte)
+ *      and pattern (second byte).
+ *
+ * IEEE80211_RADIOTAP_RATE              u8        500kb/s
+ *
+ *      Tx/Rx data rate
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTSIGNAL     int8_t          decibels from
+ *                                                      one milliwatt (dBm)
+ *
+ *      RF signal power at the antenna, decibel difference from
+ *      one milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTNOISE      int8_t          decibels from
+ *                                                      one milliwatt (dBm)
+ *
+ *      RF noise power at the antenna, decibel difference from one
+ *      milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTSIGNAL      u8        decibel (dB)
+ *
+ *      RF signal power at the antenna, decibel difference from an
+ *      arbitrary, fixed reference.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTNOISE       u8        decibel (dB)
+ *
+ *      RF noise power at the antenna, decibel difference from an
+ *      arbitrary, fixed reference point.
+ *
+ * IEEE80211_RADIOTAP_LOCK_QUALITY      u16       unitless
+ *
+ *      Quality of Barker code lock. Unitless. Monotonically
+ *      nondecreasing with "better" lock strength. Called "Signal
+ *      Quality" in datasheets.  (Is there a standard way to measure
+ *      this?)
+ *
+ * IEEE80211_RADIOTAP_TX_ATTENUATION    u16       unitless
+ *
+ *      Transmit power expressed as unitless distance from max
+ *      power set at factory calibration.  0 is max power.
+ *      Monotonically nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DB_TX_ATTENUATION u16       decibels (dB)
+ *
+ *      Transmit power expressed as decibel distance from max power
+ *      set at factory calibration.  0 is max power.  Monotonically
+ *      nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DBM_TX_POWER      int8_t          decibels from
+ *                                                      one milliwatt (dBm)
+ *
+ *      Transmit power expressed as dBm (decibels from a 1 milliwatt
+ *      reference). This is the absolute power level measured at
+ *      the antenna port.
+ *
+ * IEEE80211_RADIOTAP_FLAGS             u8        bitmap
+ *
+ *      Properties of transmitted and received frames. See flags
+ *      defined below.
+ *
+ * IEEE80211_RADIOTAP_ANTENNA           u8        antenna index
+ *
+ *      Unitless indication of the Rx/Tx antenna for this packet.
+ *      The first antenna is antenna 0.
+ *
+ * IEEE80211_RADIOTAP_FCS              u32       data
+ *
+ *     FCS from frame in network byte order.
+ */
+enum ieee80211_radiotap_type {
+       IEEE80211_RADIOTAP_TSFT = 0,
+       IEEE80211_RADIOTAP_FLAGS = 1,
+       IEEE80211_RADIOTAP_RATE = 2,
+       IEEE80211_RADIOTAP_CHANNEL = 3,
+       IEEE80211_RADIOTAP_FHSS = 4,
+       IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5,
+       IEEE80211_RADIOTAP_DBM_ANTNOISE = 6,
+       IEEE80211_RADIOTAP_LOCK_QUALITY = 7,
+       IEEE80211_RADIOTAP_TX_ATTENUATION = 8,
+       IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9,
+       IEEE80211_RADIOTAP_DBM_TX_POWER = 10,
+       IEEE80211_RADIOTAP_ANTENNA = 11,
+       IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12,
+       IEEE80211_RADIOTAP_DB_ANTNOISE = 13,
+       IEEE80211_RADIOTAP_EXT = 31,
+};
+
+/* Channel flags. */
+#define        IEEE80211_CHAN_TURBO    0x0010  /* Turbo channel */
+#define        IEEE80211_CHAN_CCK      0x0020  /* CCK channel */
+#define        IEEE80211_CHAN_OFDM     0x0040  /* OFDM channel */
+#define        IEEE80211_CHAN_2GHZ     0x0080  /* 2 GHz spectrum channel. */
+#define        IEEE80211_CHAN_5GHZ     0x0100  /* 5 GHz spectrum channel */
+#define        IEEE80211_CHAN_PASSIVE  0x0200  /* Only passive scan allowed */
+#define        IEEE80211_CHAN_DYN      0x0400  /* Dynamic CCK-OFDM channel */
+#define        IEEE80211_CHAN_GFSK     0x0800  /* GFSK channel (FHSS PHY) */
+
+/* For IEEE80211_RADIOTAP_FLAGS */
+#define        IEEE80211_RADIOTAP_F_CFP        0x01    /* sent/received
+                                                * during CFP
+                                                */
+#define        IEEE80211_RADIOTAP_F_SHORTPRE   0x02    /* sent/received
+                                                * with short
+                                                * preamble
+                                                */
+#define        IEEE80211_RADIOTAP_F_WEP        0x04    /* sent/received
+                                                * with WEP encryption
+                                                */
+#define        IEEE80211_RADIOTAP_F_FRAG       0x08    /* sent/received
+                                                * with fragmentation
+                                                */
+#define        IEEE80211_RADIOTAP_F_FCS        0x10    /* frame includes FCS */
+#define        IEEE80211_RADIOTAP_F_DATAPAD    0x20    /* frame has padding between
+                                                * 802.11 header and payload
+                                                * (to 32-bit boundary)
+                                                */
+
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+       (((x) <= 14) ? \
+       (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+       ((x) + 1000) * 5)
+
+#endif                         /* IEEE80211_RADIOTAP_H */
index ecb75526cba09d817bff41df4413f748a0e19662..e0498bd360042121d99fa0aea7ec77b8ebb2a03b 100644 (file)
@@ -207,7 +207,7 @@ struct sock {
        struct sk_buff_head     sk_write_queue;
        int                     sk_wmem_queued;
        int                     sk_forward_alloc;
-       unsigned int            sk_allocation;
+       gfp_t                   sk_allocation;
        int                     sk_sndbuf;
        int                     sk_route_caps;
        unsigned long           sk_flags;
index 614cb6ba564e41c476381c0201c9c0e2a7076f5c..877efa434700bec7d7a997b8492568be0316af5a 100644 (file)
@@ -86,7 +86,6 @@ static inline struct sppp *sppp_of(struct net_device *dev)
 
 void sppp_attach (struct ppp_device *pd);
 void sppp_detach (struct net_device *dev);
-void sppp_input (struct net_device *dev, struct sk_buff *m);
 int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
 struct sk_buff *sppp_dequeue (struct net_device *dev);
 int sppp_isempty (struct net_device *dev);
index bed4b7c9be99342b51203e43586dd50580825250..e6b61fab66ddf69a66adc5f81f6647fe2ad11da9 100644 (file)
@@ -146,7 +146,7 @@ struct scsi_cmnd {
 #define SCSI_STATE_MLQUEUE         0x100b
 
 
-extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, int);
+extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
 extern void scsi_put_command(struct scsi_cmnd *);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
index 6a140020d7cb0538ce69578488ba4b6eadb08adb..2539debb7993cf91c687ebd9f6656e5a790eee07 100644 (file)
@@ -45,7 +45,7 @@ struct scsi_request {
                                           level driver) of this request */
 };
 
-extern struct scsi_request *scsi_allocate_request(struct scsi_device *, int);
+extern struct scsi_request *scsi_allocate_request(struct scsi_device *, gfp_t);
 extern void scsi_release_request(struct scsi_request *);
 extern void scsi_wait_req(struct scsi_request *, const void *cmnd,
                          void *buffer, unsigned bufflen,
index 3a2fd2cc9f193096c1426a1050c77b1c353839ca..83489c3abbaf5f048293c4aea3041da63143a879 100644 (file)
@@ -111,7 +111,7 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id);
 int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id);
 
 /* basic memory allocation functions */
-void *snd_malloc_pages(size_t size, unsigned int gfp_flags);
+void *snd_malloc_pages(size_t size, gfp_t gfp_flags);
 void snd_free_pages(void *ptr, size_t size);
 
 #endif /* __SOUND_MEMALLOC_H */
index aefa73a8a586c50383ab22f6432186d21f5d2f08..0c56320d38dc38eac05fb4eb04b5d0e626352019 100644 (file)
@@ -133,7 +133,7 @@ struct audit_buffer {
        struct list_head     list;
        struct sk_buff       *skb;      /* formatted skb ready to send */
        struct audit_context *ctx;      /* NULL or associated context */
-       int                  gfp_mask;
+       gfp_t                gfp_mask;
 };
 
 static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
@@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
  * will be written at syscall exit.  If there is no associated task, tsk
  * should be NULL. */
 
-struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask,
+struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
                                     int type)
 {
        struct audit_buffer     *ab     = NULL;
@@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab)
 /* Log an audit record.  This is a convenience function that calls
  * audit_log_start, audit_log_vformat, and audit_log_end.  It may be
  * called in any context. */
-void audit_log(struct audit_context *ctx, int gfp_mask, int type, 
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, 
               const char *fmt, ...)
 {
        struct audit_buffer *ab;
index 88696f639aab890140657aba7fcd2956ddfe6623..d8a68509e7299df13233c98134c9431e994faa41 100644 (file)
@@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab)
        up_read(&mm->mmap_sem);
 }
 
-static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask)
+static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
 {
        int i;
        struct audit_buffer *ab;
index cdd4dcd8fb630be76f35246cb3196f327fef252b..36c5d9cd4cc14b8b0f342600c69b7a653132f1f1 100644 (file)
@@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p)
 static int kimage_is_destination_range(struct kimage *image,
                                       unsigned long start, unsigned long end);
 static struct page *kimage_alloc_page(struct kimage *image,
-                                      unsigned int gfp_mask,
+                                      gfp_t gfp_mask,
                                       unsigned long dest);
 
 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
@@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image,
        return 0;
 }
 
-static struct page *kimage_alloc_pages(unsigned int gfp_mask,
-                                       unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
        struct page *pages;
 
@@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
 }
 
 static struct page *kimage_alloc_page(struct kimage *image,
-                                       unsigned int gfp_mask,
+                                       gfp_t gfp_mask,
                                        unsigned long destination)
 {
        /*
index 2d5c4567644279d3f73c6d0563b0b72ee4b40dc2..10bc5ec496d72a25e70b42785cfaee781312dddb 100644 (file)
@@ -1095,7 +1095,7 @@ static inline void eat_page(void *page)
        *eaten_memory = c;
 }
 
-unsigned long get_usable_page(unsigned gfp_mask)
+unsigned long get_usable_page(gfp_t gfp_mask)
 {
        unsigned long m;
 
index d4df21debc4ddb77948206f6091fb27c569e9909..6414b2fb482d48255e0e54e23dcead7ee849658c 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -72,7 +72,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
  * If the system is REALLY out of memory this function returns 0,
  * otherwise 1.
  */
-int idr_pre_get(struct idr *idp, unsigned gfp_mask)
+int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
 {
        while (idp->id_free_cnt < IDR_FREE_MAX) {
                struct idr_layer *new;
index dd0917dd9fa9df9274b24e6acbe3abddaa3b17ed..253d3004ace90edae789bb23f16c2919a4e6830c 100644 (file)
@@ -100,7 +100,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
  * @kobj:      kobject in question, with which to build the path
  * @gfp_mask:  the allocation type used to allocate the path
  */
-char *kobject_get_path(struct kobject *kobj, int gfp_mask)
+char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
 {
        char *path;
        int len;
index 04ca4429ddfaf4a077f48c45bb2b718d647b4e42..7ef6f6a17aa65a667a01417a0fcdc2265021482b 100644 (file)
@@ -62,7 +62,7 @@ static struct sock *uevent_sock;
  * @gfp_mask:
  */
 static int send_uevent(const char *signal, const char *obj,
-                      char **envp, int gfp_mask)
+                      char **envp, gfp_t gfp_mask)
 {
        struct sk_buff *skb;
        char *pos;
@@ -98,7 +98,7 @@ static int send_uevent(const char *signal, const char *obj,
 }
 
 static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, 
-                            struct attribute *attr, int gfp_mask)
+                            struct attribute *attr, gfp_t gfp_mask)
 {
        char *path;
        char *attrpath;
index 1e934c196f0f1f8dbd7e69a2701f0ecc7ac1a1c7..6f3093efbd7b0f084edc5b3a85e6b71bbb357783 100644 (file)
@@ -254,7 +254,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
  *         parameters or a ERR_PTR().
  */
 struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
-                                    unsigned int len, int gfp_mask, int flags)
+                                    unsigned int len, gfp_t gfp_mask, int flags)
 {
        int err = -ENOENT;
        struct ts_config *conf;
index b5346576e58d252ea63224606bd2564cb2f088fa..1c31b2fd2ca5afdfb5e7f6bc79cc5f5eb91498ac 100644 (file)
@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
  * This function does not add the page to the LRU.  The caller must do that.
  */
 int add_to_page_cache(struct page *page, struct address_space *mapping,
-               pgoff_t offset, int gfp_mask)
+               pgoff_t offset, gfp_t gfp_mask)
 {
        int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 
@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
 EXPORT_SYMBOL(add_to_page_cache);
 
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-                               pgoff_t offset, int gfp_mask)
+                               pgoff_t offset, gfp_t gfp_mask)
 {
        int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
        if (ret == 0)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
  * memory exhaustion.
  */
 struct page *find_or_create_page(struct address_space *mapping,
-               unsigned long index, unsigned int gfp_mask)
+               unsigned long index, gfp_t gfp_mask)
 {
        struct page *page, *cached_page = NULL;
        int err;
@@ -683,7 +683,7 @@ struct page *
 grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
 {
        struct page *page = find_get_page(mapping, index);
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
 
        if (page) {
                if (!TestSetPageLocked(page))
index 90e1861e2da0defa1a9b8514a9f781b8529a6197..ce2e7e8bbfa7102a78473d1a47c24a41f7788b83 100644 (file)
 
 static mempool_t *page_pool, *isa_page_pool;
 
-static void *page_pool_alloc(gfp_t gfp_mask, void *data)
+static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
 {
-       unsigned int gfp = gfp_mask | (unsigned int) (long) data;
-
-       return alloc_page(gfp);
+       return alloc_page(gfp_mask | GFP_DMA);
 }
 
 static void page_pool_free(void *page, void *data)
@@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data)
  *  n means that there are (n-1) current users of it.
  */
 #ifdef CONFIG_HIGHMEM
+
+static void *page_pool_alloc(gfp_t gfp_mask, void *data)
+{
+       return alloc_page(gfp_mask);
+}
+
 static int pkmap_count[LAST_PKMAP];
 static unsigned int last_pkmap_nr;
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -267,7 +271,7 @@ int init_emergency_isa_pool(void)
        if (isa_page_pool)
                return 0;
 
-       isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA);
+       isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
        if (!isa_page_pool)
                BUG();
 
index 37af443eb0944e5e81b4094bf5b7312540ce170d..1d5c64df1653bf6f1c4a232477099ae4fd1112e3 100644 (file)
@@ -700,7 +700,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
        case MPOL_BIND:
                /* Lower zones don't get a policy applied */
                /* Careful: current->mems_allowed might have moved */
-               if ((gfp & GFP_ZONEMASK) >= policy_zone)
+               if (gfp_zone(gfp) >= policy_zone)
                        if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
                                return policy->v.zonelist;
                /*FALL THROUGH*/
@@ -712,7 +712,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
                nd = 0;
                BUG();
        }
-       return NODE_DATA(nd)->node_zonelists + (gfp & GFP_ZONEMASK);
+       return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
 }
 
 /* Do dynamic interleaving for a process */
@@ -757,7 +757,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned ni
        struct page *page;
 
        BUG_ON(!node_online(nid));
-       zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
+       zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
        page = __alloc_pages(gfp, order, zl);
        if (page && page_zone(page) == zl->zones[0]) {
                zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
index 9e377ea700b2bd84c05a6661a53058444bbe7ee9..1a99b80480d3da562117c14db409149abdfd7e61 100644 (file)
@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
        void *element;
        unsigned long flags;
        wait_queue_t wait;
-       unsigned int gfp_temp;
+       gfp_t gfp_temp;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
index e1d3d77f4aeef89259d9e3b09e500042a42d7a87..94c864eac9c48e26577d46f5e68cb5b9b3bcba61 100644 (file)
@@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
  * of the allocation.
  */
 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
-                     int classzone_idx, int can_try_harder, int gfp_high)
+                     int classzone_idx, int can_try_harder, gfp_t gfp_high)
 {
        /* free_pages my go negative - that's OK */
        long min = mark, free_pages = z->free_pages - (1 << order) + 1;
@@ -777,7 +777,7 @@ struct page * fastcall
 __alloc_pages(gfp_t gfp_mask, unsigned int order,
                struct zonelist *zonelist)
 {
-       const int wait = gfp_mask & __GFP_WAIT;
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
        struct zone **zones, *z;
        struct page *page;
        struct reclaim_state reclaim_state;
@@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
         * get_zeroed_page() returns a 32-bit address, which cannot represent
         * a highmem page
         */
-       BUG_ON(gfp_mask & __GFP_HIGHMEM);
+       BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
 
        page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
        if (page)
@@ -1089,7 +1089,7 @@ static unsigned int nr_free_zone_pages(int offset)
  */
 unsigned int nr_free_buffer_pages(void)
 {
-       return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
+       return nr_free_zone_pages(gfp_zone(GFP_USER));
 }
 
 /*
@@ -1097,7 +1097,7 @@ unsigned int nr_free_buffer_pages(void)
  */
 unsigned int nr_free_pagecache_pages(void)
 {
-       return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
+       return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
 }
 
 #ifdef CONFIG_HIGHMEM
@@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
        return j;
 }
 
+static inline int highest_zone(int zone_bits)
+{
+       int res = ZONE_NORMAL;
+       if (zone_bits & (__force int)__GFP_HIGHMEM)
+               res = ZONE_HIGHMEM;
+       if (zone_bits & (__force int)__GFP_DMA)
+               res = ZONE_DMA;
+       return res;
+}
+
 #ifdef CONFIG_NUMA
 #define MAX_NODE_LOAD (num_online_nodes())
 static int __initdata node_load[MAX_NUMNODES];
@@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
                        zonelist = pgdat->node_zonelists + i;
                        for (j = 0; zonelist->zones[j] != NULL; j++);
 
-                       k = ZONE_NORMAL;
-                       if (i & __GFP_HIGHMEM)
-                               k = ZONE_HIGHMEM;
-                       if (i & __GFP_DMA)
-                               k = ZONE_DMA;
+                       k = highest_zone(i);
 
                        j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
                        zonelist->zones[j] = NULL;
@@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
                zonelist = pgdat->node_zonelists + i;
 
                j = 0;
-               k = ZONE_NORMAL;
-               if (i & __GFP_HIGHMEM)
-                       k = ZONE_HIGHMEM;
-               if (i & __GFP_DMA)
-                       k = ZONE_DMA;
-
+               k = highest_zone(i);
                j = build_zonelists_node(pgdat, zonelist, j, k);
                /*
                 * Now we build the zonelist so that it contains the zones
index ea064d89cda9510d16755ab95127eae62badf94b..55e04a0734c19869d12fd07e5df398c207de121b 100644 (file)
@@ -85,7 +85,7 @@ enum sgp_type {
 static int shmem_getpage(struct inode *inode, unsigned long idx,
                         struct page **pagep, enum sgp_type sgp, int *type);
 
-static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
+static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
 {
        /*
         * The above definition of ENTRIES_PER_PAGE, and the use of
@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
 }
 
 static struct page *
-shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
+shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
                 unsigned long idx)
 {
        struct vm_area_struct pvma;
index d05c678bceb3de1f23ef3ae91170299e2c1372e5..d30423f167a2c0a5c4c4d13b0678495fffa86189 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,7 +386,7 @@ struct kmem_cache_s {
        unsigned int            gfporder;
 
        /* force GFP flags, e.g. GFP_DMA */
-       unsigned int            gfpflags;
+       gfp_t                   gfpflags;
 
        size_t                  colour;         /* cache colouring range */
        unsigned int            colour_off;     /* colour offset */
@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
        slabp->free = 0;
 }
 
-static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
+static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
 {
        if (flags & SLAB_DMA) {
                if (!(cachep->gfpflags & GFP_DMA))
@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
        struct slab     *slabp;
        void            *objp;
        size_t           offset;
-       unsigned int     local_flags;
+       gfp_t            local_flags;
        unsigned long    ctor_flags;
        struct kmem_list3 *l3;
 
@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
 /*
  * A interface to enable slab creation on nodeid
  */
-static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
+static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
 {
        struct list_head *entry;
        struct slab *slabp;
index 64f9570cff568103c8031706322c0e78b8f93029..843c87d1e61f5f63e68aa53a903567a3802958f8 100644 (file)
@@ -70,7 +70,7 @@ struct scan_control {
        unsigned int priority;
 
        /* This context's GFP mask */
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
 
        int may_writepage;
 
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
  *
  * Returns the number of slab objects which we shrunk.
  */
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
+static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        unsigned long lru_pages)
 {
        struct shrinker *shrinker;
@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
+int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
 {
        int priority;
        int ret = 0;
@@ -1338,7 +1338,7 @@ module_init(kswapd_init)
 /*
  * Try to free up some pages from this zone through reclaim.
  */
-int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
+int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 {
        struct scan_control sc;
        int nr_pages = 1 << order;
index 1dcf7fa1f0fead6dc2469d0a478025284921e804..e68700f950a55d3cff784a54d655001a7da30dec 100644 (file)
@@ -1625,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
 
                memset(&ndst, 0, sizeof(ndst));
 
-               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               for_each_cpu(cpu) {
                        struct neigh_statistics *st;
 
-                       if (!cpu_possible(cpu))
-                               continue;
-
                        st = per_cpu_ptr(tbl->stats, cpu);
                        ndst.ndts_allocs                += st->allocs;
                        ndst.ndts_destroys              += st->destroys;
index 5f043d346694dc012ba3403b82d875cd6a0133f1..7fc3e9e28c34c3353b3e48082916d4cb737ad5e8 100644 (file)
@@ -75,7 +75,7 @@
  * By design there should only be *one* "controlling" process. In practice 
  * multiple write accesses gives unpredictable result. Understood by "write" 
  * to /proc gives result code thats should be read be the "writer".
- * For pratical use this should be no problem.
+ * For practical use this should be no problem.
  *
  * Note when adding devices to a specific CPU there good idea to also assign 
  * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 
@@ -96,7 +96,7 @@
  * New xmit() return, do_div and misc clean up by Stephen Hemminger 
  * <shemminger@osdl.org> 040923
  *
- * Rany Dunlap fixed u64 printk compiler waring 
+ * Randy Dunlap fixed u64 printk compiler waring 
  *
  * Remove FCS from BW calculation.  Lennert Buytenhek <buytenh@wantstofly.org>
  * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
 #include <linux/ipv6.h>
 #include <linux/udp.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/wait.h>
 #include <net/checksum.h>
 #include <net/ipv6.h>
 #include <asm/timex.h>
 
 
-#define VERSION  "pktgen v2.62: Packet Generator for packet performance testing.\n"
+#define VERSION  "pktgen v2.63: Packet Generator for packet performance testing.\n"
 
 /* #define PG_DEBUG(a) a */
 #define PG_DEBUG(a) 
 #define T_REMDEV      (1<<3)  /* Remove all devs */
 
 /* Locks */
-#define   thread_lock()        spin_lock(&_thread_lock)
-#define   thread_unlock()      spin_unlock(&_thread_lock)
+#define   thread_lock()        down(&pktgen_sem)
+#define   thread_unlock()      up(&pktgen_sem)
 
 /* If lock -- can be removed after some work */
 #define   if_lock(t)           spin_lock(&(t->if_lock));
 
 /* Used to help with determining the pkts on receive */
 #define PKTGEN_MAGIC 0xbe9be955
-#define PG_PROC_DIR "net/pktgen"
+#define PG_PROC_DIR "pktgen"
+#define PGCTRL     "pgctrl"
+static struct proc_dir_entry *pg_proc_dir = NULL;
 
 #define MAX_CFLOWS  65536
 
@@ -202,11 +205,8 @@ struct pktgen_dev {
         * Try to keep frequent/infrequent used vars. separated.
         */
 
-        char ifname[32];
-        struct proc_dir_entry *proc_ent;
+        char ifname[IFNAMSIZ];
         char result[512];
-        /* proc file names */
-        char fname[80];
 
         struct pktgen_thread* pg_thread; /* the owner */
         struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */
@@ -244,7 +244,7 @@ struct pktgen_dev {
         __u32 seq_num;
         
         int clone_skb; /* Use multiple SKBs during packet gen.  If this number
-                          * is greater than 1, then that many coppies of the same
+                          * is greater than 1, then that many copies of the same
                           * packet will be sent before a new packet is allocated.
                           * For instance, if you want to send 1024 identical packets
                           * before creating a new packet, set clone_skb to 1024.
@@ -330,8 +330,6 @@ struct pktgen_thread {
         struct pktgen_dev *if_list;           /* All device here */
         struct pktgen_thread* next;
         char name[32];
-        char fname[128]; /* name of proc file */
-        struct proc_dir_entry *proc_ent;
         char result[512];
         u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */
         
@@ -396,7 +394,7 @@ static inline s64 divremdi3(s64 x, s64 y, int type)
 
 /* End of hacks to deal with 64-bit math on x86 */
 
-/** Convert to miliseconds */
+/** Convert to milliseconds */
 static inline __u64 tv_to_ms(const struct timeval* tv) 
 {
         __u64 ms = tv->tv_usec / 1000;
@@ -425,7 +423,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base)
 {
         __u64 tmp = n;
 /*
- * How do we know if the architectrure we are running on
+ * How do we know if the architecture we are running on
  * supports division with 64 bit base?
  * 
  */
@@ -473,16 +471,6 @@ static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b)
 
 static char version[] __initdata = VERSION;
 
-static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, size_t count, loff_t *ppos);
-static ssize_t proc_pgctrl_write(struct file* file, const char __user * buf, size_t count, loff_t *ppos);
-static int proc_if_read(char *buf , char **start, off_t offset, int len, int *eof, void *data);
-
-static int proc_thread_read(char *buf , char **start, off_t offset, int len, int *eof, void *data);
-static int proc_if_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data);
-static int proc_thread_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data);
-static int create_proc_dir(void);
-static int remove_proc_dir(void);
-
 static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i);
 static int pktgen_add_device(struct pktgen_thread* t, const char* ifname);
 static struct pktgen_thread* pktgen_find_thread(const char* name);
@@ -503,83 +491,41 @@ static int pg_delay_d = 0;
 static int pg_clone_skb_d = 0;
 static int debug = 0;
 
-static DEFINE_SPINLOCK(_thread_lock);
+static DECLARE_MUTEX(pktgen_sem);
 static struct pktgen_thread *pktgen_threads = NULL;
 
-static char module_fname[128];
-static struct proc_dir_entry *module_proc_ent = NULL;
-
 static struct notifier_block pktgen_notifier_block = {
        .notifier_call = pktgen_device_event,
 };
 
-static struct file_operations pktgen_fops = {
-        .read     = proc_pgctrl_read,
-        .write    = proc_pgctrl_write,
-       /*  .ioctl    = pktgen_ioctl, later maybe */
-};
-
 /*
  * /proc handling functions 
  *
  */
 
-static struct proc_dir_entry *pg_proc_dir = NULL;
-static int proc_pgctrl_read_eof=0;
-
-static ssize_t proc_pgctrl_read(struct file* file, char __user * buf,
-                                 size_t count, loff_t *ppos)
+static int pgctrl_show(struct seq_file *seq, void *v)
 { 
-       char data[200];
-       int len = 0;
-
-       if(proc_pgctrl_read_eof) {
-               proc_pgctrl_read_eof=0;
-               len = 0;
-               goto out;
-       }
-
-       sprintf(data, "%s", VERSION); 
-
-       len = strlen(data);
-
-       if(len > count) {
-               len =-EFAULT;
-               goto out;
-       }       
-
-       if (copy_to_user(buf, data, len)) {
-               len =-EFAULT;
-               goto out;
-       }  
-
-       *ppos += len;
-       proc_pgctrl_read_eof=1; /* EOF next call */
-
- out:
-       return len;
+       seq_puts(seq, VERSION);
+       return 0;
 }
 
-static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf,
-                                size_t count, loff_t *ppos)
+static ssize_t pgctrl_write(struct file* file,const char __user * buf,
+                           size_t count, loff_t *ppos)
 {
-       char *data = NULL;
        int err = 0;
+       char data[128];
 
         if (!capable(CAP_NET_ADMIN)){
                 err = -EPERM;
                goto out;
         }
 
-       data = (void*)vmalloc ((unsigned int)count);
+       if (count > sizeof(data))
+               count = sizeof(data);
 
-       if(!data) {
-               err = -ENOMEM;
-               goto out;
-       }
        if (copy_from_user(data, buf, count)) {
-               err =-EFAULT;
-               goto out_free;
+               err = -EFAULT;
+               goto out;
        }  
        data[count-1] = 0; /* Make string */
 
@@ -594,31 +540,40 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf,
 
        err = count;
 
- out_free:
-       vfree (data);
  out:
         return err;
 }
 
-static int proc_if_read(char *buf , char **start, off_t offset,
-                           int len, int *eof, void *data)
+static int pgctrl_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pgctrl_show, PDE(inode)->data);
+}
+
+static struct file_operations pktgen_fops = {
+       .owner    = THIS_MODULE,
+       .open     = pgctrl_open,
+        .read     = seq_read,
+       .llseek   = seq_lseek,
+        .write    = pgctrl_write,
+       .release  = single_release,
+};
+
+static int pktgen_if_show(struct seq_file *seq, void *v)
 {
-       char *p;
        int i;
-        struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data);
+        struct pktgen_dev *pkt_dev = seq->private;
         __u64 sa;
         __u64 stopped;
         __u64 now = getCurUs();
         
-       p = buf;
-       p += sprintf(p, "Params: count %llu  min_pkt_size: %u  max_pkt_size: %u\n",
-                    (unsigned long long) pkt_dev->count,
-                    pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
+       seq_printf(seq, "Params: count %llu  min_pkt_size: %u  max_pkt_size: %u\n",
+                  (unsigned long long) pkt_dev->count,
+                  pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
 
-       p += sprintf(p, "     frags: %d  delay: %u  clone_skb: %d  ifname: %s\n",
-                     pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname);
+       seq_printf(seq, "     frags: %d  delay: %u  clone_skb: %d  ifname: %s\n",
+                  pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname);
 
-       p += sprintf(p, "     flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow);
+       seq_printf(seq, "     flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow);
 
 
        if(pkt_dev->flags & F_IPV6) {
@@ -626,19 +581,19 @@ static int proc_if_read(char *buf , char **start, off_t offset,
                fmt_ip6(b1,  pkt_dev->in6_saddr.s6_addr);
                fmt_ip6(b2,  pkt_dev->min_in6_saddr.s6_addr);
                fmt_ip6(b3,  pkt_dev->max_in6_saddr.s6_addr);
-               p += sprintf(p, "     saddr: %s  min_saddr: %s  max_saddr: %s\n", b1, b2, b3);
+               seq_printf(seq, "     saddr: %s  min_saddr: %s  max_saddr: %s\n", b1, b2, b3);
 
                fmt_ip6(b1,  pkt_dev->in6_daddr.s6_addr);
                fmt_ip6(b2,  pkt_dev->min_in6_daddr.s6_addr);
                fmt_ip6(b3,  pkt_dev->max_in6_daddr.s6_addr);
-               p += sprintf(p, "     daddr: %s  min_daddr: %s  max_daddr: %s\n", b1, b2, b3);
+               seq_printf(seq, "     daddr: %s  min_daddr: %s  max_daddr: %s\n", b1, b2, b3);
 
        } 
        else 
-               p += sprintf(p, "     dst_min: %s  dst_max: %s\n     src_min: %s  src_max: %s\n",
-                     pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max);
+               seq_printf(seq,"     dst_min: %s  dst_max: %s\n     src_min: %s  src_max: %s\n",
+                          pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max);
 
-        p += sprintf(p, "     src_mac: ");
+       seq_puts(seq, "     src_mac: ");
 
        if ((pkt_dev->src_mac[0] == 0) && 
            (pkt_dev->src_mac[1] == 0) && 
@@ -648,89 +603,89 @@ static int proc_if_read(char *buf , char **start, off_t offset,
            (pkt_dev->src_mac[5] == 0)) 
 
                for (i = 0; i < 6; i++) 
-                       p += sprintf(p, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? "  " : ":");
+                       seq_printf(seq,  "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? "  " : ":");
 
        else 
                for (i = 0; i < 6; i++) 
-                       p += sprintf(p, "%02X%s", pkt_dev->src_mac[i], i == 5 ? "  " : ":");
+                       seq_printf(seq,  "%02X%s", pkt_dev->src_mac[i], i == 5 ? "  " : ":");
 
-        p += sprintf(p, "dst_mac: ");
+        seq_printf(seq,  "dst_mac: ");
        for (i = 0; i < 6; i++) 
-               p += sprintf(p, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":");
+               seq_printf(seq,  "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":");
 
-        p += sprintf(p, "     udp_src_min: %d  udp_src_max: %d  udp_dst_min: %d  udp_dst_max: %d\n",
-                     pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min,
-                     pkt_dev->udp_dst_max);
+        seq_printf(seq,  "     udp_src_min: %d  udp_src_max: %d  udp_dst_min: %d  udp_dst_max: %d\n",
+                  pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min,
+                  pkt_dev->udp_dst_max);
 
-        p += sprintf(p, "     src_mac_count: %d  dst_mac_count: %d \n     Flags: ",
-                     pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
+        seq_printf(seq,  "     src_mac_count: %d  dst_mac_count: %d \n     Flags: ",
+                  pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
 
 
         if (pkt_dev->flags &  F_IPV6) 
-                p += sprintf(p, "IPV6  ");
+                seq_printf(seq,  "IPV6  ");
 
         if (pkt_dev->flags &  F_IPSRC_RND) 
-                p += sprintf(p, "IPSRC_RND  ");
+                seq_printf(seq,  "IPSRC_RND  ");
 
         if (pkt_dev->flags & F_IPDST_RND) 
-                p += sprintf(p, "IPDST_RND  ");
+                seq_printf(seq,  "IPDST_RND  ");
         
         if (pkt_dev->flags & F_TXSIZE_RND) 
-                p += sprintf(p, "TXSIZE_RND  ");
+                seq_printf(seq,  "TXSIZE_RND  ");
         
         if (pkt_dev->flags & F_UDPSRC_RND) 
-                p += sprintf(p, "UDPSRC_RND  ");
+                seq_printf(seq,  "UDPSRC_RND  ");
         
         if (pkt_dev->flags & F_UDPDST_RND) 
-                p += sprintf(p, "UDPDST_RND  ");
+                seq_printf(seq,  "UDPDST_RND  ");
         
         if (pkt_dev->flags & F_MACSRC_RND) 
-                p += sprintf(p, "MACSRC_RND  ");
+                seq_printf(seq,  "MACSRC_RND  ");
         
         if (pkt_dev->flags & F_MACDST_RND) 
-                p += sprintf(p, "MACDST_RND  ");
+                seq_printf(seq,  "MACDST_RND  ");
 
         
-        p += sprintf(p, "\n");
+        seq_puts(seq,  "\n");
         
         sa = pkt_dev->started_at;
         stopped = pkt_dev->stopped_at;
         if (pkt_dev->running) 
                 stopped = now; /* not really stopped, more like last-running-at */
         
-        p += sprintf(p, "Current:\n     pkts-sofar: %llu  errors: %llu\n     started: %lluus  stopped: %lluus idle: %lluus\n",
-                    (unsigned long long) pkt_dev->sofar,
-                    (unsigned long long) pkt_dev->errors,
-                    (unsigned long long) sa,
-                    (unsigned long long) stopped, 
-                    (unsigned long long) pkt_dev->idle_acc);
+        seq_printf(seq,  "Current:\n     pkts-sofar: %llu  errors: %llu\n     started: %lluus  stopped: %lluus idle: %lluus\n",
+                  (unsigned long long) pkt_dev->sofar,
+                  (unsigned long long) pkt_dev->errors,
+                  (unsigned long long) sa,
+                  (unsigned long long) stopped,
+                  (unsigned long long) pkt_dev->idle_acc);
 
-        p += sprintf(p, "     seq_num: %d  cur_dst_mac_offset: %d  cur_src_mac_offset: %d\n",
-                     pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset);
+        seq_printf(seq,  "     seq_num: %d  cur_dst_mac_offset: %d  cur_src_mac_offset: %d\n",
+                  pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
+                  pkt_dev->cur_src_mac_offset);
 
        if(pkt_dev->flags & F_IPV6) {
                char b1[128], b2[128];
                fmt_ip6(b1,  pkt_dev->cur_in6_daddr.s6_addr);
                fmt_ip6(b2,  pkt_dev->cur_in6_saddr.s6_addr);
-               p += sprintf(p, "     cur_saddr: %s  cur_daddr: %s\n", b2, b1);
+               seq_printf(seq,  "     cur_saddr: %s  cur_daddr: %s\n", b2, b1);
        } 
        else 
-               p += sprintf(p, "     cur_saddr: 0x%x  cur_daddr: 0x%x\n",
-                     pkt_dev->cur_saddr, pkt_dev->cur_daddr);
+               seq_printf(seq,  "     cur_saddr: 0x%x  cur_daddr: 0x%x\n",
+                          pkt_dev->cur_saddr, pkt_dev->cur_daddr);
 
 
-       p += sprintf(p, "     cur_udp_dst: %d  cur_udp_src: %d\n",
-                     pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
+       seq_printf(seq,  "     cur_udp_dst: %d  cur_udp_src: %d\n",
+                  pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
 
-       p += sprintf(p, "     flows: %u\n", pkt_dev->nflows);
+       seq_printf(seq,  "     flows: %u\n", pkt_dev->nflows);
 
        if (pkt_dev->result[0])
-               p += sprintf(p, "Result: %s\n", pkt_dev->result);
+               seq_printf(seq,  "Result: %s\n", pkt_dev->result);
        else
-               p += sprintf(p, "Result: Idle\n");
-       *eof = 1;
+               seq_printf(seq,  "Result: Idle\n");
 
-       return p - buf;
+       return 0;
 }
 
 
@@ -802,13 +757,14 @@ done_str:
        return i;
 }
 
-static int proc_if_write(struct file *file, const char __user *user_buffer,
-                            unsigned long count, void *data)
+static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer,
+                              size_t count, loff_t *offset)
 {
+       struct seq_file *seq = (struct seq_file *) file->private_data;
+        struct pktgen_dev *pkt_dev = seq->private;
        int i = 0, max, len;
        char name[16], valstr[32];
        unsigned long value = 0;
-        struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data);
         char* pg_result = NULL;
         int tmp = 0;
        char buf[128];
@@ -849,7 +805,8 @@ static int proc_if_write(struct file *file, const char __user *user_buffer,
                 if (copy_from_user(tb, user_buffer, count))
                        return -EFAULT;
                 tb[count] = 0;
-               printk("pktgen: %s,%lu  buffer -:%s:-\n", name, count, tb);
+               printk("pktgen: %s,%lu  buffer -:%s:-\n", name,
+                      (unsigned long) count, tb);
         }
 
        if (!strcmp(name, "min_pkt_size")) {
@@ -1335,92 +1292,98 @@ static int proc_if_write(struct file *file, const char __user *user_buffer,
        return -EINVAL;
 }
 
-static int proc_thread_read(char *buf , char **start, off_t offset,
-                               int len, int *eof, void *data)
+static int pktgen_if_open(struct inode *inode, struct file *file)
 {
-       char *p;
-        struct pktgen_thread *t = (struct pktgen_thread*)(data);
-        struct pktgen_dev *pkt_dev = NULL;
+       return single_open(file, pktgen_if_show, PDE(inode)->data);
+}
 
+static struct file_operations pktgen_if_fops = {
+       .owner    = THIS_MODULE,
+       .open     = pktgen_if_open,
+        .read     = seq_read,
+       .llseek   = seq_lseek,
+        .write    = pktgen_if_write,
+       .release  = single_release,
+};
 
-        if (!t) {
-                printk("pktgen: ERROR: could not find thread in proc_thread_read\n");
-                return -EINVAL;
-        }
+static int pktgen_thread_show(struct seq_file *seq, void *v)
+{
+        struct pktgen_thread *t = seq->private;
+        struct pktgen_dev *pkt_dev = NULL;
+
+       BUG_ON(!t);
 
-       p = buf;
-       p += sprintf(p, "Name: %s  max_before_softirq: %d\n",
+       seq_printf(seq, "Name: %s  max_before_softirq: %d\n",
                      t->name, t->max_before_softirq);
 
-        p += sprintf(p, "Running: ");
+        seq_printf(seq, "Running: ");
         
         if_lock(t);
         for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) 
                if(pkt_dev->running)
-                       p += sprintf(p, "%s ", pkt_dev->ifname);
+                       seq_printf(seq, "%s ", pkt_dev->ifname);
         
-        p += sprintf(p, "\nStopped: ");
+        seq_printf(seq, "\nStopped: ");
 
         for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) 
                if(!pkt_dev->running)
-                       p += sprintf(p, "%s ", pkt_dev->ifname);
+                       seq_printf(seq, "%s ", pkt_dev->ifname);
 
        if (t->result[0])
-               p += sprintf(p, "\nResult: %s\n", t->result);
+               seq_printf(seq, "\nResult: %s\n", t->result);
        else
-               p += sprintf(p, "\nResult: NA\n");
-
-       *eof = 1;
+               seq_printf(seq, "\nResult: NA\n");
 
         if_unlock(t);
 
-       return p - buf;
+       return 0;
 }
 
-static int proc_thread_write(struct file *file, const char __user *user_buffer,
-                                unsigned long count, void *data)
+static ssize_t pktgen_thread_write(struct file *file,
+                                  const char __user *user_buffer,
+                                  size_t count, loff_t *offset)
 {
+       struct seq_file *seq = (struct seq_file *) file->private_data;
+        struct pktgen_thread *t = seq->private;
        int i = 0, max, len, ret;
        char name[40];
-        struct pktgen_thread *t;
         char *pg_result;
         unsigned long value = 0;
-        
+
        if (count < 1) {
                //      sprintf(pg_result, "Wrong command format");
                return -EINVAL;
        }
-  
+
        max = count - i;
         len = count_trail_chars(&user_buffer[i], max);
-        if (len < 0) 
-               return len; 
-     
+        if (len < 0)
+               return len;
+
        i += len;
-  
+
        /* Read variable name */
 
        len = strn_len(&user_buffer[i], sizeof(name) - 1);
-        if (len < 0)  
-               return len; 
+        if (len < 0)
+               return len;
        
        memset(name, 0, sizeof(name));
        if (copy_from_user(name, &user_buffer[i], len))
                return -EFAULT;
        i += len;
-  
+
        max = count -i;
        len = count_trail_chars(&user_buffer[i], max);
-        if (len < 0)  
-               return len; 
-       
+        if (len < 0)
+               return len;
+
        i += len;
 
-       if (debug) 
-               printk("pktgen: t=%s, count=%lu\n", name, count);
-        
+       if (debug)
+               printk("pktgen: t=%s, count=%lu\n", name,
+                      (unsigned long) count);
 
-        t = (struct pktgen_thread*)(data);
        if(!t) {
                printk("pktgen: ERROR: No thread\n");
                ret = -EINVAL;
@@ -1474,21 +1437,19 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer,
        return ret;
 }
 
-static int create_proc_dir(void)
+static int pktgen_thread_open(struct inode *inode, struct file *file)
 {
-       pg_proc_dir = proc_mkdir(PG_PROC_DIR, NULL);
-        
-        if (!pg_proc_dir) 
-                return -ENODEV;
-        
-        return 0;
+       return single_open(file, pktgen_thread_show, PDE(inode)->data);
 }
 
-static int remove_proc_dir(void)
-{
-        remove_proc_entry(PG_PROC_DIR, NULL);
-        return 0;
-}
+static struct file_operations pktgen_thread_fops = {
+       .owner    = THIS_MODULE,
+       .open     = pktgen_thread_open,
+        .read     = seq_read,
+       .llseek   = seq_lseek,
+        .write    = pktgen_thread_write,
+       .release  = single_release,
+};
 
 /* Think find or remove for NN */
 static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove) 
@@ -1702,7 +1663,7 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
        start = now = getCurUs();
        printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now));
        while (now < spin_until_us) {
-               /* TODO: optimise sleeping behavior */
+               /* TODO: optimize sleeping behavior */
                if (spin_until_us - now > jiffies_to_usecs(1)+1)
                        schedule_timeout_interruptible(1);
                else if (spin_until_us - now > 100) {
@@ -2361,7 +2322,7 @@ static void pktgen_stop_all_threads_ifs(void)
                pktgen_stop(t);
                t = t->next;
        }
-       thread_unlock();
+       thread_unlock();
 }
 
 static int thread_is_running(struct pktgen_thread *t )
@@ -2552,10 +2513,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
 
        struct pktgen_thread *tmp = pktgen_threads;
 
-        if (strlen(t->fname))
-                remove_proc_entry(t->fname, NULL);
+       remove_proc_entry(t->name, pg_proc_dir);
 
-       thread_lock();
+       thread_lock();
 
        if (tmp == t)
                pktgen_threads = tmp->next;
@@ -2825,7 +2785,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* i
         if_lock(t);
 
         for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) {
-                if (strcmp(pkt_dev->ifname, ifname) == 0) {
+                if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) {
                         break;
                 }
         }
@@ -2864,74 +2824,70 @@ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev
 static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) 
 {
         struct pktgen_dev *pkt_dev;
+       struct proc_dir_entry *pe;
        
        /* We don't allow a device to be on several threads */
 
-       if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) {
-                                                  
-               pkt_dev = kmalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
-                if (!pkt_dev) 
-                        return -ENOMEM;
+       pkt_dev = __pktgen_NN_threads(ifname, FIND);
+       if (pkt_dev) {
+                printk("pktgen: ERROR: interface already used.\n");
+                return -EBUSY;
+        }
 
-                memset(pkt_dev, 0, sizeof(struct pktgen_dev));
+       pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
+       if (!pkt_dev)
+               return -ENOMEM;
 
-               pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state));
-               if (pkt_dev->flows == NULL) {
-                       kfree(pkt_dev);
-                       return -ENOMEM;
-               }
-               memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state));
-
-               pkt_dev->min_pkt_size = ETH_ZLEN;
-                pkt_dev->max_pkt_size = ETH_ZLEN;
-                pkt_dev->nfrags = 0;
-                pkt_dev->clone_skb = pg_clone_skb_d;
-                pkt_dev->delay_us = pg_delay_d / 1000;
-                pkt_dev->delay_ns = pg_delay_d % 1000;
-                pkt_dev->count = pg_count_d;
-                pkt_dev->sofar = 0;
-                pkt_dev->udp_src_min = 9; /* sink port */
-                pkt_dev->udp_src_max = 9;
-                pkt_dev->udp_dst_min = 9;
-                pkt_dev->udp_dst_max = 9;
-
-                strncpy(pkt_dev->ifname, ifname, 31);
-                sprintf(pkt_dev->fname, "%s/%s", PG_PROC_DIR, ifname);
-
-                if (! pktgen_setup_dev(pkt_dev)) {
-                        printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
-                       if (pkt_dev->flows)
-                               vfree(pkt_dev->flows);
-                        kfree(pkt_dev);
-                        return -ENODEV;
-                }
+       pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state));
+       if (pkt_dev->flows == NULL) {
+               kfree(pkt_dev);
+               return -ENOMEM;
+       }
+       memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state));
 
-                pkt_dev->proc_ent = create_proc_entry(pkt_dev->fname, 0600, NULL);
-                if (!pkt_dev->proc_ent) {
-                        printk("pktgen: cannot create %s procfs entry.\n", pkt_dev->fname);
-                       if (pkt_dev->flows)
-                               vfree(pkt_dev->flows);
-                        kfree(pkt_dev);
-                        return -EINVAL;
-                }
-                pkt_dev->proc_ent->read_proc = proc_if_read;
-                pkt_dev->proc_ent->write_proc = proc_if_write;
-                pkt_dev->proc_ent->data = (void*)(pkt_dev);
-               pkt_dev->proc_ent->owner = THIS_MODULE;
+       pkt_dev->min_pkt_size = ETH_ZLEN;
+       pkt_dev->max_pkt_size = ETH_ZLEN;
+       pkt_dev->nfrags = 0;
+       pkt_dev->clone_skb = pg_clone_skb_d;
+       pkt_dev->delay_us = pg_delay_d / 1000;
+       pkt_dev->delay_ns = pg_delay_d % 1000;
+       pkt_dev->count = pg_count_d;
+       pkt_dev->sofar = 0;
+       pkt_dev->udp_src_min = 9; /* sink port */
+       pkt_dev->udp_src_max = 9;
+       pkt_dev->udp_dst_min = 9;
+       pkt_dev->udp_dst_max = 9;
+
+       strncpy(pkt_dev->ifname, ifname, IFNAMSIZ);
+
+       if (! pktgen_setup_dev(pkt_dev)) {
+               printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
+               if (pkt_dev->flows)
+                       vfree(pkt_dev->flows);
+               kfree(pkt_dev);
+               return -ENODEV;
+       }
+
+       pe = create_proc_entry(ifname, 0600, pg_proc_dir);
+       if (!pe) {
+               printk("pktgen: cannot create %s/%s procfs entry.\n",
+                      PG_PROC_DIR, ifname);
+               if (pkt_dev->flows)
+                       vfree(pkt_dev->flows);
+               kfree(pkt_dev);
+               return -EINVAL;
+       }
+       pe->proc_fops = &pktgen_if_fops;
+       pe->data = pkt_dev;
 
-                return add_dev_to_thread(t, pkt_dev);
-        }
-        else {
-                printk("pktgen: ERROR: interface already used.\n");
-                return -EBUSY;
-        }
+       return add_dev_to_thread(t, pkt_dev);
 }
 
 static struct pktgen_thread *pktgen_find_thread(const char* name) 
 {
         struct pktgen_thread *t = NULL;
 
-       thread_lock();
+       thread_lock();
 
         t = pktgen_threads;
         while (t) {
@@ -2947,6 +2903,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name)
 static int pktgen_create_thread(const char* name, int cpu) 
 {
         struct pktgen_thread *t = NULL;
+       struct proc_dir_entry *pe;
 
         if (strlen(name) > 31) {
                 printk("pktgen: ERROR:  Thread name cannot be more than 31 characters.\n");
@@ -2958,28 +2915,26 @@ static int pktgen_create_thread(const char* name, int cpu)
                 return -EINVAL;
         }
 
-        t = (struct pktgen_thread*)(kmalloc(sizeof(struct pktgen_thread), GFP_KERNEL));
+        t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
         if (!t) {
                 printk("pktgen: ERROR: out of memory, can't create new thread.\n");
                 return -ENOMEM;
         }
 
-        memset(t, 0, sizeof(struct pktgen_thread));
         strcpy(t->name, name);
         spin_lock_init(&t->if_lock);
        t->cpu = cpu;
         
-        sprintf(t->fname, "%s/%s", PG_PROC_DIR, t->name);
-        t->proc_ent = create_proc_entry(t->fname, 0600, NULL);
-        if (!t->proc_ent) {
-                printk("pktgen: cannot create %s procfs entry.\n", t->fname);
+        pe = create_proc_entry(t->name, 0600, pg_proc_dir);
+        if (!pe) {
+                printk("pktgen: cannot create %s/%s procfs entry.\n",
+                      PG_PROC_DIR, t->name);
                 kfree(t);
                 return -EINVAL;
         }
-        t->proc_ent->read_proc = proc_thread_read;
-        t->proc_ent->write_proc = proc_thread_write;
-        t->proc_ent->data = (void*)(t);
-        t->proc_ent->owner = THIS_MODULE;
+
+       pe->proc_fops = &pktgen_thread_fops;
+       pe->data = t;
 
         t->next = pktgen_threads;
         pktgen_threads = t;
@@ -3034,8 +2989,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_
 
         /* Clean up proc file system */
 
-        if (strlen(pkt_dev->fname)) 
-                remove_proc_entry(pkt_dev->fname, NULL);
+       remove_proc_entry(pkt_dev->ifname, pg_proc_dir);
 
        if (pkt_dev->flows)
                vfree(pkt_dev->flows);
@@ -3046,31 +3000,31 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_
 static int __init pg_init(void) 
 {
        int cpu;
-       printk(version);
+       struct proc_dir_entry *pe;
 
-        module_fname[0] = 0;
+       printk(version);
 
-       create_proc_dir();
+       pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net);
+       if (!pg_proc_dir)
+               return -ENODEV;
+       pg_proc_dir->owner = THIS_MODULE;
 
-        sprintf(module_fname, "%s/pgctrl", PG_PROC_DIR);
-        module_proc_ent = create_proc_entry(module_fname, 0600, NULL);
-        if (!module_proc_ent) {
-                printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname);
+       pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
+        if (pe == NULL) {
+               printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL);
+               proc_net_remove(PG_PROC_DIR);
                 return -EINVAL;
         }
 
-        module_proc_ent->proc_fops =  &pktgen_fops;
-        module_proc_ent->data = NULL;
+        pe->proc_fops = &pktgen_fops;
+        pe->data      = NULL;
 
        /* Register us to receive netdevice events */
        register_netdevice_notifier(&pktgen_notifier_block);
         
-       for (cpu = 0; cpu < NR_CPUS ; cpu++) {
+       for_each_online_cpu(cpu) {
                char buf[30];
 
-               if (!cpu_online(cpu))
-                       continue;
-
                 sprintf(buf, "kpktgend_%i", cpu);
                 pktgen_create_thread(buf, cpu);
         }
@@ -3095,10 +3049,8 @@ static void __exit pg_cleanup(void)
        unregister_netdevice_notifier(&pktgen_notifier_block);
 
         /* Clean up proc file system */
-
-        remove_proc_entry(module_fname, NULL);
-        
-       remove_proc_dir();
+       remove_proc_entry(PGCTRL, pg_proc_dir);
+       proc_net_remove(PG_PROC_DIR);
 }
 
 
index 02cd4cde2112a6456e1da188873b708af9bc1338..ef9d46b91eb9200fa3eaf9fbc428808fcf1663c4 100644 (file)
@@ -122,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
  *     __alloc_skb     -       allocate a network buffer
  *     @size: size to allocate
  *     @gfp_mask: allocation mask
+ *     @fclone: allocate from fclone cache instead of head cache
+ *             and allocate a cloned (child) skb
  *
  *     Allocate a new &sk_buff. The returned buffer has no headroom and a
  *     tail room of size bytes. The object has a reference count of one.
index 1c52fe809edad0a79038748f589c915ec96f6a6f..9602ceb3bac9a2d33eec5ce1bc0cb8a55340203b 100644 (file)
@@ -940,7 +940,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
                                            int noblock, int *errcode)
 {
        struct sk_buff *skb;
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
        long timeo;
        int err;
 
index 29250749f16f86044d1f5fec4a592271fa12f856..d59f86f7ceabbdd21a66a3910fd1f2ac16e52a35 100644 (file)
@@ -495,7 +495,7 @@ void dccp_send_close(struct sock *sk, const int active)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct sk_buff *skb;
-       const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
+       const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
 
        skb = alloc_skb(sk->sk_prot->max_header, prio);
        if (skb == NULL)
index 1186dc44cdff2ad8d459167e16e6103cec017a24..3f25cadccddd4f048cdbf36d2fa5e92a8a176116 100644 (file)
@@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (saddr->sdn_flags & ~SDF_WILD)
                return -EINVAL;
 
-#if 1
        if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
            (saddr->sdn_flags & SDF_WILD)))
                return -EACCES;
-#else
-       /*
-        * Maybe put the default actions in the default security ops for
-        * dn_prot_sock ? Would be nice if the capable call would go there
-        * too.
-        */
-       if (security_dn_prot_sock(saddr) &&
-           !capable(CAP_NET_BIND_SERVICE) || 
-           saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))
-               return -EACCES;
-#endif
-
 
        if (!(saddr->sdn_flags & SDF_WILD)) {
                if (dn_ntohs(saddr->sdn_nodeaddrl)) {
index a6ccac5baea8863b77b967d2fe61358ea77cb158..f988417121da76646ad995ce3a556c09349b0338 100644 (file)
@@ -7,5 +7,6 @@ ieee80211-objs := \
        ieee80211_module.o \
        ieee80211_tx.o \
        ieee80211_rx.o \
-       ieee80211_wx.o
+       ieee80211_wx.o \
+       ieee80211_geo.o
 
index 61a9d92e455b67e5dc9651470780a32120e57d7a..f3b6aa3be63878a08708cf5542f654117882527e 100644 (file)
@@ -41,6 +41,12 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
 {
        struct list_head *ptr, *n;
        struct ieee80211_crypt_data *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ieee->lock, flags);
+
+       if (list_empty(&ieee->crypt_deinit_list))
+               goto unlock;
 
        for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
             ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
@@ -57,6 +63,18 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
                }
                kfree(entry);
        }
+      unlock:
+       spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+/* After this, crypt_deinit_list won't accept new members */
+void ieee80211_crypt_quiescing(struct ieee80211_device *ieee)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ieee->lock, flags);
+       ieee->crypt_quiesced = 1;
+       spin_unlock_irqrestore(&ieee->lock, flags);
 }
 
 void ieee80211_crypt_deinit_handler(unsigned long data)
@@ -64,16 +82,16 @@ void ieee80211_crypt_deinit_handler(unsigned long data)
        struct ieee80211_device *ieee = (struct ieee80211_device *)data;
        unsigned long flags;
 
-       spin_lock_irqsave(&ieee->lock, flags);
        ieee80211_crypt_deinit_entries(ieee, 0);
-       if (!list_empty(&ieee->crypt_deinit_list)) {
+
+       spin_lock_irqsave(&ieee->lock, flags);
+       if (!list_empty(&ieee->crypt_deinit_list) && !ieee->crypt_quiesced) {
                printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
                       "deletion list\n", ieee->dev->name);
                ieee->crypt_deinit_timer.expires = jiffies + HZ;
                add_timer(&ieee->crypt_deinit_timer);
        }
        spin_unlock_irqrestore(&ieee->lock, flags);
-
 }
 
 void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
@@ -93,10 +111,12 @@ void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
         * locking. */
 
        spin_lock_irqsave(&ieee->lock, flags);
-       list_add(&tmp->list, &ieee->crypt_deinit_list);
-       if (!timer_pending(&ieee->crypt_deinit_timer)) {
-               ieee->crypt_deinit_timer.expires = jiffies + HZ;
-               add_timer(&ieee->crypt_deinit_timer);
+       if (!ieee->crypt_quiesced) {
+               list_add(&tmp->list, &ieee->crypt_deinit_list);
+               if (!timer_pending(&ieee->crypt_deinit_timer)) {
+                       ieee->crypt_deinit_timer.expires = jiffies + HZ;
+                       add_timer(&ieee->crypt_deinit_timer);
+               }
        }
        spin_unlock_irqrestore(&ieee->lock, flags);
 }
@@ -191,18 +211,18 @@ static void ieee80211_crypt_null_deinit(void *priv)
 }
 
 static struct ieee80211_crypto_ops ieee80211_crypt_null = {
-       .name                   = "NULL",
-       .init                   = ieee80211_crypt_null_init,
-       .deinit                 = ieee80211_crypt_null_deinit,
-       .encrypt_mpdu           = NULL,
-       .decrypt_mpdu           = NULL,
-       .encrypt_msdu           = NULL,
-       .decrypt_msdu           = NULL,
-       .set_key                = NULL,
-       .get_key                = NULL,
-       .extra_prefix_len       = 0,
-       .extra_postfix_len      = 0,
-       .owner                  = THIS_MODULE,
+       .name = "NULL",
+       .init = ieee80211_crypt_null_init,
+       .deinit = ieee80211_crypt_null_deinit,
+       .encrypt_mpdu = NULL,
+       .decrypt_mpdu = NULL,
+       .encrypt_msdu = NULL,
+       .decrypt_msdu = NULL,
+       .set_key = NULL,
+       .get_key = NULL,
+       .extra_mpdu_prefix_len = 0,
+       .extra_mpdu_postfix_len = 0,
+       .owner = THIS_MODULE,
 };
 
 static int __init ieee80211_crypto_init(void)
@@ -249,6 +269,7 @@ static void __exit ieee80211_crypto_deinit(void)
 EXPORT_SYMBOL(ieee80211_crypt_deinit_entries);
 EXPORT_SYMBOL(ieee80211_crypt_deinit_handler);
 EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit);
+EXPORT_SYMBOL(ieee80211_crypt_quiescing);
 
 EXPORT_SYMBOL(ieee80211_register_crypto_ops);
 EXPORT_SYMBOL(ieee80211_unregister_crypto_ops);
index 8fc13f45971e3b895ffa508f4750319a5a6e641c..05a853c13012836fe518cf965049e84ce1f59b0a 100644 (file)
@@ -119,7 +119,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len)
 }
 
 static void ccmp_init_blocks(struct crypto_tfm *tfm,
-                            struct ieee80211_hdr *hdr,
+                            struct ieee80211_hdr_4addr *hdr,
                             u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0)
 {
        u8 *pos, qc = 0;
@@ -191,26 +191,18 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
        ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
 }
 
-static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct ieee80211_ccmp_data *key = priv;
-       int data_len, i, blocks, last, len;
-       u8 *pos, *mic;
-       struct ieee80211_hdr *hdr;
-       u8 *b0 = key->tx_b0;
-       u8 *b = key->tx_b;
-       u8 *e = key->tx_e;
-       u8 *s0 = key->tx_s0;
+       int i;
+       u8 *pos;
 
-       if (skb_headroom(skb) < CCMP_HDR_LEN ||
-           skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
+       if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len)
                return -1;
 
-       data_len = skb->len - hdr_len;
        pos = skb_push(skb, CCMP_HDR_LEN);
        memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
        pos += hdr_len;
-       mic = skb_put(skb, CCMP_MIC_LEN);
 
        i = CCMP_PN_LEN - 1;
        while (i >= 0) {
@@ -229,7 +221,31 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        *pos++ = key->tx_pn[1];
        *pos++ = key->tx_pn[0];
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       return CCMP_HDR_LEN;
+}
+
+static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+       struct ieee80211_ccmp_data *key = priv;
+       int data_len, i, blocks, last, len;
+       u8 *pos, *mic;
+       struct ieee80211_hdr_4addr *hdr;
+       u8 *b0 = key->tx_b0;
+       u8 *b = key->tx_b;
+       u8 *e = key->tx_e;
+       u8 *s0 = key->tx_s0;
+
+       if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
+               return -1;
+
+       data_len = skb->len - hdr_len;
+       len = ieee80211_ccmp_hdr(skb, hdr_len, priv);
+       if (len < 0)
+               return -1;
+
+       pos = skb->data + hdr_len + CCMP_HDR_LEN;
+       mic = skb_put(skb, CCMP_MIC_LEN);
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
        ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
 
        blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
@@ -258,7 +274,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct ieee80211_ccmp_data *key = priv;
        u8 keyidx, *pos;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u8 *b0 = key->rx_b0;
        u8 *b = key->rx_b;
        u8 *a = key->rx_a;
@@ -272,7 +288,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
                return -1;
        }
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
        pos = skb->data + hdr_len;
        keyidx = pos[3];
        if (!(keyidx & (1 << 5))) {
@@ -426,19 +442,20 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv)
 }
 
 static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
-       .name                   = "CCMP",
-       .init                   = ieee80211_ccmp_init,
-       .deinit                 = ieee80211_ccmp_deinit,
-       .encrypt_mpdu           = ieee80211_ccmp_encrypt,
-       .decrypt_mpdu           = ieee80211_ccmp_decrypt,
-       .encrypt_msdu           = NULL,
-       .decrypt_msdu           = NULL,
-       .set_key                = ieee80211_ccmp_set_key,
-       .get_key                = ieee80211_ccmp_get_key,
-       .print_stats            = ieee80211_ccmp_print_stats,
-       .extra_prefix_len       = CCMP_HDR_LEN,
-       .extra_postfix_len      = CCMP_MIC_LEN,
-       .owner                  = THIS_MODULE,
+       .name = "CCMP",
+       .init = ieee80211_ccmp_init,
+       .deinit = ieee80211_ccmp_deinit,
+       .build_iv = ieee80211_ccmp_hdr,
+       .encrypt_mpdu = ieee80211_ccmp_encrypt,
+       .decrypt_mpdu = ieee80211_ccmp_decrypt,
+       .encrypt_msdu = NULL,
+       .decrypt_msdu = NULL,
+       .set_key = ieee80211_ccmp_set_key,
+       .get_key = ieee80211_ccmp_get_key,
+       .print_stats = ieee80211_ccmp_print_stats,
+       .extra_mpdu_prefix_len = CCMP_HDR_LEN,
+       .extra_mpdu_postfix_len = CCMP_MIC_LEN,
+       .owner = THIS_MODULE,
 };
 
 static int __init ieee80211_crypto_ccmp_init(void)
index d4f9164be1a1848f75f49fd0ee631ce70aa6fdc6..2e34f29b7956f46c4f81fef31953af4953fd2752 100644 (file)
@@ -59,8 +59,24 @@ struct ieee80211_tkip_data {
 
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16], tx_hdr[16];
+
+       unsigned long flags;
 };
 
+static unsigned long ieee80211_tkip_set_flags(unsigned long flags, void *priv)
+{
+       struct ieee80211_tkip_data *_priv = priv;
+       unsigned long old_flags = _priv->flags;
+       _priv->flags = flags;
+       return old_flags;
+}
+
+static unsigned long ieee80211_tkip_get_flags(void *priv)
+{
+       struct ieee80211_tkip_data *_priv = priv;
+       return _priv->flags;
+}
+
 static void *ieee80211_tkip_init(int key_idx)
 {
        struct ieee80211_tkip_data *priv;
@@ -69,6 +85,7 @@ static void *ieee80211_tkip_init(int key_idx)
        if (priv == NULL)
                goto fail;
        memset(priv, 0, sizeof(*priv));
+
        priv->key_idx = key_idx;
 
        priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
@@ -255,25 +272,27 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK,
 #endif
 }
 
-static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct ieee80211_tkip_data *tkey = priv;
        int len;
-       u8 rc4key[16], *pos, *icv;
-       struct ieee80211_hdr *hdr;
+       u8 *rc4key, *pos, *icv;
+       struct ieee80211_hdr_4addr *hdr;
        u32 crc;
-       struct scatterlist sg;
 
-       if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
-           skb->len < hdr_len)
-               return -1;
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
+
+       if (skb_headroom(skb) < 8 || skb->len < hdr_len)
+               return NULL;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
        if (!tkey->tx_phase1_done) {
                tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
                                   tkey->tx_iv32);
                tkey->tx_phase1_done = 1;
        }
+       rc4key = kmalloc(16, GFP_ATOMIC);
+       if (!rc4key)
+               return NULL;
        tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
 
        len = skb->len - hdr_len;
@@ -282,9 +301,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        pos += hdr_len;
        icv = skb_put(skb, 4);
 
-       *pos++ = rc4key[0];
-       *pos++ = rc4key[1];
-       *pos++ = rc4key[2];
+       *pos++ = *rc4key;
+       *pos++ = *(rc4key + 1);
+       *pos++ = *(rc4key + 2);
        *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ;
        *pos++ = tkey->tx_iv32 & 0xff;
        *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
@@ -297,6 +316,38 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
+       return rc4key;
+}
+
+static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+       struct ieee80211_tkip_data *tkey = priv;
+       int len;
+       const u8 *rc4key;
+       u8 *pos;
+       struct scatterlist sg;
+
+       if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
+               if (net_ratelimit()) {
+                       struct ieee80211_hdr_4addr *hdr =
+                           (struct ieee80211_hdr_4addr *)skb->data;
+                       printk(KERN_DEBUG "TKIP countermeasures: dropped "
+                              "TX packet to " MAC_FMT "\n",
+                              MAC_ARG(hdr->addr1));
+               }
+               return -1;
+       }
+
+       if (skb_tailroom(skb) < 4 || skb->len < hdr_len)
+               return -1;
+
+       len = skb->len - hdr_len;
+       pos = skb->data + hdr_len;
+
+       rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv);
+       if (!rc4key)
+               return -1;
+
        crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
        sg.page = virt_to_page(pos);
        sg.offset = offset_in_page(pos);
@@ -319,16 +370,26 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 keyidx, *pos;
        u32 iv32;
        u16 iv16;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        u8 icv[4];
        u32 crc;
        struct scatterlist sg;
        int plen;
 
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
+
+       if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
+               if (net_ratelimit()) {
+                       printk(KERN_DEBUG "TKIP countermeasures: dropped "
+                              "received packet from " MAC_FMT "\n",
+                              MAC_ARG(hdr->addr2));
+               }
+               return -1;
+       }
+
        if (skb->len < hdr_len + 8 + 4)
                return -1;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
        pos = skb->data + hdr_len;
        keyidx = pos[3];
        if (!(keyidx & (1 << 5))) {
@@ -441,9 +502,9 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
 {
-       struct ieee80211_hdr *hdr11;
+       struct ieee80211_hdr_4addr *hdr11;
 
-       hdr11 = (struct ieee80211_hdr *)skb->data;
+       hdr11 = (struct ieee80211_hdr_4addr *)skb->data;
        switch (le16_to_cpu(hdr11->frame_ctl) &
                (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
        case IEEE80211_FCTL_TODS:
@@ -490,9 +551,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
        return 0;
 }
 
-#if WIRELESS_EXT >= 18
 static void ieee80211_michael_mic_failure(struct net_device *dev,
-                                         struct ieee80211_hdr *hdr, int keyidx)
+                                         struct ieee80211_hdr_4addr *hdr,
+                                         int keyidx)
 {
        union iwreq_data wrqu;
        struct iw_michaelmicfailure ev;
@@ -510,28 +571,6 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
        wrqu.data.length = sizeof(ev);
        wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
 }
-#elif WIRELESS_EXT >= 15
-static void ieee80211_michael_mic_failure(struct net_device *dev,
-                                         struct ieee80211_hdr *hdr, int keyidx)
-{
-       union iwreq_data wrqu;
-       char buf[128];
-
-       /* TODO: needed parameters: count, keyid, key type, TSC */
-       sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
-               MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni",
-               MAC_ARG(hdr->addr2));
-       memset(&wrqu, 0, sizeof(wrqu));
-       wrqu.data.length = strlen(buf);
-       wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
-}
-#else                          /* WIRELESS_EXT >= 15 */
-static inline void ieee80211_michael_mic_failure(struct net_device *dev,
-                                                struct ieee80211_hdr *hdr,
-                                                int keyidx)
-{
-}
-#endif                         /* WIRELESS_EXT >= 15 */
 
 static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
                                        int hdr_len, void *priv)
@@ -547,8 +586,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
                        skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
                return -1;
        if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
-               struct ieee80211_hdr *hdr;
-               hdr = (struct ieee80211_hdr *)skb->data;
+               struct ieee80211_hdr_4addr *hdr;
+               hdr = (struct ieee80211_hdr_4addr *)skb->data;
                printk(KERN_DEBUG "%s: Michael MIC verification failed for "
                       "MSDU from " MAC_FMT " keyidx=%d\n",
                       skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2),
@@ -654,19 +693,22 @@ static char *ieee80211_tkip_print_stats(char *p, void *priv)
 }
 
 static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
-       .name                   = "TKIP",
-       .init                   = ieee80211_tkip_init,
-       .deinit                 = ieee80211_tkip_deinit,
-       .encrypt_mpdu           = ieee80211_tkip_encrypt,
-       .decrypt_mpdu           = ieee80211_tkip_decrypt,
-       .encrypt_msdu           = ieee80211_michael_mic_add,
-       .decrypt_msdu           = ieee80211_michael_mic_verify,
-       .set_key                = ieee80211_tkip_set_key,
-       .get_key                = ieee80211_tkip_get_key,
-       .print_stats            = ieee80211_tkip_print_stats,
-       .extra_prefix_len       = 4 + 4,        /* IV + ExtIV */
-       .extra_postfix_len      = 8 + 4,        /* MIC + ICV */
-       .owner                  = THIS_MODULE,
+       .name = "TKIP",
+       .init = ieee80211_tkip_init,
+       .deinit = ieee80211_tkip_deinit,
+       .encrypt_mpdu = ieee80211_tkip_encrypt,
+       .decrypt_mpdu = ieee80211_tkip_decrypt,
+       .encrypt_msdu = ieee80211_michael_mic_add,
+       .decrypt_msdu = ieee80211_michael_mic_verify,
+       .set_key = ieee80211_tkip_set_key,
+       .get_key = ieee80211_tkip_get_key,
+       .print_stats = ieee80211_tkip_print_stats,
+       .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
+       .extra_mpdu_postfix_len = 4,    /* ICV */
+       .extra_msdu_postfix_len = 8,    /* MIC */
+       .get_flags = ieee80211_tkip_get_flags,
+       .set_flags = ieee80211_tkip_set_flags,
+       .owner = THIS_MODULE,
 };
 
 static int __init ieee80211_crypto_tkip_init(void)
index b4d2514a090270be5b5822e671678ded227231df..7c08ed2f2628f4dd439465f0abdd90358a8e39a9 100644 (file)
@@ -229,19 +229,19 @@ static char *prism2_wep_print_stats(char *p, void *priv)
 }
 
 static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
-       .name                   = "WEP",
-       .init                   = prism2_wep_init,
-       .deinit                 = prism2_wep_deinit,
-       .encrypt_mpdu           = prism2_wep_encrypt,
-       .decrypt_mpdu           = prism2_wep_decrypt,
-       .encrypt_msdu           = NULL,
-       .decrypt_msdu           = NULL,
-       .set_key                = prism2_wep_set_key,
-       .get_key                = prism2_wep_get_key,
-       .print_stats            = prism2_wep_print_stats,
-       .extra_prefix_len       = 4,    /* IV */
-       .extra_postfix_len      = 4,    /* ICV */
-       .owner                  = THIS_MODULE,
+       .name = "WEP",
+       .init = prism2_wep_init,
+       .deinit = prism2_wep_deinit,
+       .encrypt_mpdu = prism2_wep_encrypt,
+       .decrypt_mpdu = prism2_wep_decrypt,
+       .encrypt_msdu = NULL,
+       .decrypt_msdu = NULL,
+       .set_key = prism2_wep_set_key,
+       .get_key = prism2_wep_get_key,
+       .print_stats = prism2_wep_print_stats,
+       .extra_mpdu_prefix_len = 4,     /* IV */
+       .extra_mpdu_postfix_len = 4,    /* ICV */
+       .owner = THIS_MODULE,
 };
 
 static int __init ieee80211_crypto_wep_init(void)
diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c
new file mode 100644 (file)
index 0000000..c4b54ef
--- /dev/null
@@ -0,0 +1,141 @@
+/******************************************************************************
+
+  Copyright(c) 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/compiler.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <asm/uaccess.h>
+
+#include <net/ieee80211.h>
+
+int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
+{
+       int i;
+
+       /* Driver needs to initialize the geography map before using
+        * these helper functions */
+       BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
+
+       if (ieee->freq_band & IEEE80211_24GHZ_BAND)
+               for (i = 0; i < ieee->geo.bg_channels; i++)
+                       /* NOTE: If G mode is currently supported but
+                        * this is a B only channel, we don't see it
+                        * as valid. */
+                       if ((ieee->geo.bg[i].channel == channel) &&
+                           (!(ieee->mode & IEEE_G) ||
+                            !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
+                               return IEEE80211_24GHZ_BAND;
+
+       if (ieee->freq_band & IEEE80211_52GHZ_BAND)
+               for (i = 0; i < ieee->geo.a_channels; i++)
+                       if (ieee->geo.a[i].channel == channel)
+                               return IEEE80211_52GHZ_BAND;
+
+       return 0;
+}
+
+int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel)
+{
+       int i;
+
+       /* Driver needs to initialize the geography map before using
+        * these helper functions */
+       BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
+
+       if (ieee->freq_band & IEEE80211_24GHZ_BAND)
+               for (i = 0; i < ieee->geo.bg_channels; i++)
+                       if (ieee->geo.bg[i].channel == channel)
+                               return i;
+
+       if (ieee->freq_band & IEEE80211_52GHZ_BAND)
+               for (i = 0; i < ieee->geo.a_channels; i++)
+                       if (ieee->geo.a[i].channel == channel)
+                               return i;
+
+       return -1;
+}
+
+u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq)
+{
+       int i;
+
+       /* Driver needs to initialize the geography map before using
+        * these helper functions */
+       BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
+
+       freq /= 100000;
+
+       if (ieee->freq_band & IEEE80211_24GHZ_BAND)
+               for (i = 0; i < ieee->geo.bg_channels; i++)
+                       if (ieee->geo.bg[i].freq == freq)
+                               return ieee->geo.bg[i].channel;
+
+       if (ieee->freq_band & IEEE80211_52GHZ_BAND)
+               for (i = 0; i < ieee->geo.a_channels; i++)
+                       if (ieee->geo.a[i].freq == freq)
+                               return ieee->geo.a[i].channel;
+
+       return 0;
+}
+
+int ieee80211_set_geo(struct ieee80211_device *ieee,
+                     const struct ieee80211_geo *geo)
+{
+       memcpy(ieee->geo.name, geo->name, 3);
+       ieee->geo.name[3] = '\0';
+       ieee->geo.bg_channels = geo->bg_channels;
+       ieee->geo.a_channels = geo->a_channels;
+       memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
+              sizeof(struct ieee80211_channel));
+       memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
+              sizeof(struct ieee80211_channel));
+       return 0;
+}
+
+const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee)
+{
+       return &ieee->geo;
+}
+
+EXPORT_SYMBOL(ieee80211_is_valid_channel);
+EXPORT_SYMBOL(ieee80211_freq_to_channel);
+EXPORT_SYMBOL(ieee80211_channel_to_index);
+EXPORT_SYMBOL(ieee80211_set_geo);
+EXPORT_SYMBOL(ieee80211_get_geo);
index 6059e9e37123b711554c6e50a18daf8a5872c793..f66d792cd204b068976f7655e625cb4a21d99978 100644 (file)
@@ -1,6 +1,6 @@
 /*******************************************************************************
 
-  Copyright(c) 2004 Intel Corporation. All rights reserved.
+  Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
 
   Portions of this file are based on the WEP enablement code provided by the
   Host AP project hostap-drivers v0.1.3
 
 #include <net/ieee80211.h>
 
-MODULE_DESCRIPTION("802.11 data/management/control stack");
-MODULE_AUTHOR
-    ("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
-MODULE_LICENSE("GPL");
+#define DRV_DESCRIPTION "802.11 data/management/control stack"
+#define DRV_NAME        "ieee80211"
+#define DRV_VERSION    IEEE80211_VERSION
+#define DRV_COPYRIGHT   "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
 
-#define DRV_NAME "ieee80211"
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
 
 static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
 {
@@ -126,26 +129,34 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
 
        /* Default fragmentation threshold is maximum payload size */
        ieee->fts = DEFAULT_FTS;
+       ieee->rts = DEFAULT_FTS;
        ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
        ieee->open_wep = 1;
 
        /* Default to enabling full open WEP with host based encrypt/decrypt */
        ieee->host_encrypt = 1;
        ieee->host_decrypt = 1;
+       ieee->host_mc_decrypt = 1;
+
+       /* Host fragementation in Open mode. Default is enabled.
+        * Note: host fragmentation is always enabled if host encryption
+        * is enabled. For cards can do hardware encryption, they must do
+        * hardware fragmentation as well. So we don't need a variable
+        * like host_enc_frag. */
+       ieee->host_open_frag = 1;
        ieee->ieee802_1x = 1;   /* Default to supporting 802.1x */
 
        INIT_LIST_HEAD(&ieee->crypt_deinit_list);
        init_timer(&ieee->crypt_deinit_timer);
        ieee->crypt_deinit_timer.data = (unsigned long)ieee;
        ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
+       ieee->crypt_quiesced = 0;
 
        spin_lock_init(&ieee->lock);
 
        ieee->wpa_enabled = 0;
-       ieee->tkip_countermeasures = 0;
        ieee->drop_unencrypted = 0;
        ieee->privacy_invoked = 0;
-       ieee->ieee802_1x = 1;
 
        return dev;
 
@@ -161,6 +172,7 @@ void free_ieee80211(struct net_device *dev)
 
        int i;
 
+       ieee80211_crypt_quiescing(ieee);
        del_timer_sync(&ieee->crypt_deinit_timer);
        ieee80211_crypt_deinit_entries(ieee, 1);
 
@@ -195,38 +207,26 @@ static int show_debug_level(char *page, char **start, off_t offset,
 static int store_debug_level(struct file *file, const char __user * buffer,
                             unsigned long count, void *data)
 {
-       char buf[] = "0x00000000";
-       char *p = (char *)buf;
+       char buf[] = "0x00000000\n";
+       unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
        unsigned long val;
 
-       if (count > sizeof(buf) - 1)
-               count = sizeof(buf) - 1;
-
-       if (copy_from_user(buf, buffer, count))
+       if (copy_from_user(buf, buffer, len))
                return count;
-       buf[count] = 0;
-       /*
-        * what a FPOS...  What, sscanf(buf, "%i", &val) would be too
-        * scary?
-        */
-       if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
-               p++;
-               if (p[0] == 'x' || p[0] == 'X')
-                       p++;
-               val = simple_strtoul(p, &p, 16);
-       } else
-               val = simple_strtoul(p, &p, 10);
-       if (p == buf)
+       buf[len] = 0;
+       if (sscanf(buf, "%li", &val) != 1)
                printk(KERN_INFO DRV_NAME
                       ": %s is not in hex or decimal form.\n", buf);
        else
                ieee80211_debug_level = val;
 
-       return strlen(buf);
+       return strnlen(buf, len);
 }
+#endif                         /* CONFIG_IEEE80211_DEBUG */
 
 static int __init ieee80211_init(void)
 {
+#ifdef CONFIG_IEEE80211_DEBUG
        struct proc_dir_entry *e;
 
        ieee80211_debug_level = debug;
@@ -246,26 +246,33 @@ static int __init ieee80211_init(void)
        e->read_proc = show_debug_level;
        e->write_proc = store_debug_level;
        e->data = NULL;
+#endif                         /* CONFIG_IEEE80211_DEBUG */
+
+       printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
+       printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
 
        return 0;
 }
 
 static void __exit ieee80211_exit(void)
 {
+#ifdef CONFIG_IEEE80211_DEBUG
        if (ieee80211_proc) {
                remove_proc_entry("debug_level", ieee80211_proc);
                remove_proc_entry(DRV_NAME, proc_net);
                ieee80211_proc = NULL;
        }
+#endif                         /* CONFIG_IEEE80211_DEBUG */
 }
 
+#ifdef CONFIG_IEEE80211_DEBUG
 #include <linux/moduleparam.h>
 module_param(debug, int, 0444);
 MODULE_PARM_DESC(debug, "debug output mask");
+#endif                         /* CONFIG_IEEE80211_DEBUG */
 
 module_exit(ieee80211_exit);
 module_init(ieee80211_init);
-#endif
 
 const char *escape_essid(const char *essid, u8 essid_len)
 {
index f7dcd854139e011c960643a76e2a40ae77566341..ce694cf5c1604a945c491446241debf8d3ee9666 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
  * <jkmaline@cc.hut.fi>
  * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- * Copyright (c) 2004, Intel Corporation
+ * Copyright (c) 2004-2005, Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -87,7 +87,7 @@ static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct
 
 /* Called only as a tasklet (software IRQ) */
 static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
-                                               struct ieee80211_hdr *hdr)
+                                               struct ieee80211_hdr_4addr *hdr)
 {
        struct sk_buff *skb = NULL;
        u16 sc;
@@ -101,7 +101,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
        if (frag == 0) {
                /* Reserve enough space to fit maximum frame length */
                skb = dev_alloc_skb(ieee->dev->mtu +
-                                   sizeof(struct ieee80211_hdr) +
+                                   sizeof(struct ieee80211_hdr_4addr) +
                                    8 /* LLC */  +
                                    2 /* alignment */  +
                                    8 /* WEP */  + ETH_ALEN /* WDS */ );
@@ -138,7 +138,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
 
 /* Called only as a tasklet (software IRQ) */
 static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
-                                          struct ieee80211_hdr *hdr)
+                                          struct ieee80211_hdr_4addr *hdr)
 {
        u16 sc;
        unsigned int seq;
@@ -176,7 +176,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
                       ieee->dev->name);
                return 0;
 /*
-  hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *)
+  hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
   skb->data);*/
        }
 
@@ -232,13 +232,13 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
 {
        struct net_device *dev = ieee->dev;
        u16 fc, ethertype;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_3addr *hdr;
        u8 *pos;
 
        if (skb->len < 24)
                return 0;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_3addr *)skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        /* check that the frame is unicast frame to us */
@@ -271,26 +271,15 @@ static inline int
 ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
                           struct ieee80211_crypt_data *crypt)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_3addr *hdr;
        int res, hdrlen;
 
        if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
                return 0;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_3addr *)skb->data;
        hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
-#ifdef CONFIG_IEEE80211_CRYPT_TKIP
-       if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) {
-               if (net_ratelimit()) {
-                       printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
-                              "received packet from " MAC_FMT "\n",
-                              ieee->dev->name, MAC_ARG(hdr->addr2));
-               }
-               return -1;
-       }
-#endif
-
        atomic_inc(&crypt->refcnt);
        res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
        atomic_dec(&crypt->refcnt);
@@ -314,13 +303,13 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
                                struct sk_buff *skb, int keyidx,
                                struct ieee80211_crypt_data *crypt)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_3addr *hdr;
        int res, hdrlen;
 
        if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
                return 0;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_3addr *)skb->data;
        hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
        atomic_inc(&crypt->refcnt);
@@ -343,7 +332,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                 struct ieee80211_rx_stats *rx_stats)
 {
        struct net_device *dev = ieee->dev;
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr_4addr *hdr;
        size_t hdrlen;
        u16 fc, type, stype, sc;
        struct net_device_stats *stats;
@@ -363,7 +352,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
        struct ieee80211_crypt_data *crypt = NULL;
        int keyidx = 0;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
        stats = &ieee->stats;
 
        if (skb->len < 10) {
@@ -378,35 +367,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
        frag = WLAN_GET_SEQ_FRAG(sc);
        hdrlen = ieee80211_get_hdrlen(fc);
 
-#ifdef NOT_YET
-#if WIRELESS_EXT > 15
        /* Put this code here so that we avoid duplicating it in all
         * Rx paths. - Jean II */
 #ifdef IW_WIRELESS_SPY         /* defined in iw_handler.h */
        /* If spy monitoring on */
-       if (iface->spy_data.spy_number > 0) {
+       if (ieee->spy_data.spy_number > 0) {
                struct iw_quality wstats;
-               wstats.level = rx_stats->signal;
-               wstats.noise = rx_stats->noise;
-               wstats.updated = 6;     /* No qual value */
+
+               wstats.updated = 0;
+               if (rx_stats->mask & IEEE80211_STATMASK_RSSI) {
+                       wstats.level = rx_stats->rssi;
+                       wstats.updated |= IW_QUAL_LEVEL_UPDATED;
+               } else
+                       wstats.updated |= IW_QUAL_LEVEL_INVALID;
+
+               if (rx_stats->mask & IEEE80211_STATMASK_NOISE) {
+                       wstats.noise = rx_stats->noise;
+                       wstats.updated |= IW_QUAL_NOISE_UPDATED;
+               } else
+                       wstats.updated |= IW_QUAL_NOISE_INVALID;
+
+               if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) {
+                       wstats.qual = rx_stats->signal;
+                       wstats.updated |= IW_QUAL_QUAL_UPDATED;
+               } else
+                       wstats.updated |= IW_QUAL_QUAL_INVALID;
+
                /* Update spy records */
-               wireless_spy_update(dev, hdr->addr2, &wstats);
+               wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
        }
 #endif                         /* IW_WIRELESS_SPY */
-#endif                         /* WIRELESS_EXT > 15 */
+
+#ifdef NOT_YET
        hostap_update_rx_stats(local->ap, hdr, rx_stats);
 #endif
 
-#if WIRELESS_EXT > 15
        if (ieee->iw_mode == IW_MODE_MONITOR) {
                ieee80211_monitor_rx(ieee, skb, rx_stats);
                stats->rx_packets++;
                stats->rx_bytes += skb->len;
                return 1;
        }
-#endif
 
-       if (ieee->host_decrypt) {
+       if ((is_multicast_ether_addr(hdr->addr1) ||
+            is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt :
+           ieee->host_decrypt) {
                int idx = 0;
                if (skb->len >= hdrlen + 3)
                        idx = skb->data[hdrlen + 3] >> 6;
@@ -531,6 +536,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
        /* Nullfunc frames may have PS-bit set, so they must be passed to
         * hostap_handle_sta_rx() before being dropped here. */
+
+       stype &= ~IEEE80211_STYPE_QOS_DATA;
+
        if (stype != IEEE80211_STYPE_DATA &&
            stype != IEEE80211_STYPE_DATA_CFACK &&
            stype != IEEE80211_STYPE_DATA_CFPOLL &&
@@ -549,7 +557,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
            (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
                goto rx_dropped;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
 
        /* skb: hdr + (possibly fragmented) plaintext payload */
        // PR: FIXME: hostap has additional conditions in the "if" below:
@@ -603,7 +611,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                /* this was the last fragment and the frame will be
                 * delivered, so remove skb from fragment cache */
                skb = frag_skb;
-               hdr = (struct ieee80211_hdr *)skb->data;
+               hdr = (struct ieee80211_hdr_4addr *)skb->data;
                ieee80211_frag_cache_invalidate(ieee, hdr);
        }
 
@@ -613,7 +621,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
            ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
                goto rx_dropped;
 
-       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr = (struct ieee80211_hdr_4addr *)skb->data;
        if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) {
                if (            /*ieee->ieee802_1x && */
                           ieee80211_is_eapol_frame(ieee, skb)) {
@@ -755,69 +763,179 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
 #define MGMT_FRAME_FIXED_PART_LENGTH           0x24
 
-static inline int ieee80211_is_ofdm_rate(u8 rate)
+static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
+
+/*
+* Make ther structure we read from the beacon packet has
+* the right values
+*/
+static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
+                                    *info_element, int sub_type)
 {
-       switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
-       case IEEE80211_OFDM_RATE_6MB:
-       case IEEE80211_OFDM_RATE_9MB:
-       case IEEE80211_OFDM_RATE_12MB:
-       case IEEE80211_OFDM_RATE_18MB:
-       case IEEE80211_OFDM_RATE_24MB:
-       case IEEE80211_OFDM_RATE_36MB:
-       case IEEE80211_OFDM_RATE_48MB:
-       case IEEE80211_OFDM_RATE_54MB:
-               return 1;
-       }
+
+       if (info_element->qui_subtype != sub_type)
+               return -1;
+       if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
+               return -1;
+       if (info_element->qui_type != QOS_OUI_TYPE)
+               return -1;
+       if (info_element->version != QOS_VERSION_1)
+               return -1;
+
        return 0;
 }
 
-static inline int ieee80211_network_init(struct ieee80211_device *ieee,
-                                        struct ieee80211_probe_response
-                                        *beacon,
-                                        struct ieee80211_network *network,
-                                        struct ieee80211_rx_stats *stats)
+/*
+ * Parse a QoS parameter element
+ */
+static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
+                                           *element_param, struct ieee80211_info_element
+                                           *info_element)
 {
-#ifdef CONFIG_IEEE80211_DEBUG
-       char rates_str[64];
-       char *p;
-#endif
-       struct ieee80211_info_element *info_element;
-       u16 left;
-       u8 i;
+       int ret = 0;
+       u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
 
-       /* Pull out fixed field data */
-       memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
-       network->capability = beacon->capability;
-       network->last_scanned = jiffies;
-       network->time_stamp[0] = beacon->time_stamp[0];
-       network->time_stamp[1] = beacon->time_stamp[1];
-       network->beacon_interval = beacon->beacon_interval;
-       /* Where to pull this? beacon->listen_interval; */
-       network->listen_interval = 0x0A;
-       network->rates_len = network->rates_ex_len = 0;
-       network->last_associate = 0;
-       network->ssid_len = 0;
-       network->flags = 0;
-       network->atim_window = 0;
+       if ((info_element == NULL) || (element_param == NULL))
+               return -1;
 
-       if (stats->freq == IEEE80211_52GHZ_BAND) {
-               /* for A band (No DS info) */
-               network->channel = stats->received_channel;
+       if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
+               memcpy(element_param->info_element.qui, info_element->data,
+                      info_element->len);
+               element_param->info_element.elementID = info_element->id;
+               element_param->info_element.length = info_element->len;
        } else
-               network->flags |= NETWORK_HAS_CCK;
+               ret = -1;
+       if (ret == 0)
+               ret = ieee80211_verify_qos_info(&element_param->info_element,
+                                               QOS_OUI_PARAM_SUB_TYPE);
+       return ret;
+}
 
-       network->wpa_ie_len = 0;
-       network->rsn_ie_len = 0;
+/*
+ * Parse a QoS information element
+ */
+static int ieee80211_read_qos_info_element(struct
+                                          ieee80211_qos_information_element
+                                          *element_info, struct ieee80211_info_element
+                                          *info_element)
+{
+       int ret = 0;
+       u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
+
+       if (element_info == NULL)
+               return -1;
+       if (info_element == NULL)
+               return -1;
+
+       if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
+               memcpy(element_info->qui, info_element->data,
+                      info_element->len);
+               element_info->elementID = info_element->id;
+               element_info->length = info_element->len;
+       } else
+               ret = -1;
+
+       if (ret == 0)
+               ret = ieee80211_verify_qos_info(element_info,
+                                               QOS_OUI_INFO_SUB_TYPE);
+       return ret;
+}
+
+/*
+ * Write QoS parameters from the ac parameters.
+ */
+static int ieee80211_qos_convert_ac_to_parameters(struct
+                                                 ieee80211_qos_parameter_info
+                                                 *param_elm, struct
+                                                 ieee80211_qos_parameters
+                                                 *qos_param)
+{
+       int rc = 0;
+       int i;
+       struct ieee80211_qos_ac_parameter *ac_params;
+       u32 txop;
+       u8 cw_min;
+       u8 cw_max;
+
+       for (i = 0; i < QOS_QUEUE_NUM; i++) {
+               ac_params = &(param_elm->ac_params_record[i]);
+
+               qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F;
+               qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2;
+
+               cw_min = ac_params->ecw_min_max & 0x0F;
+               qos_param->cw_min[i] = (u16) ((1 << cw_min) - 1);
+
+               cw_max = (ac_params->ecw_min_max & 0xF0) >> 4;
+               qos_param->cw_max[i] = (u16) ((1 << cw_max) - 1);
+
+               qos_param->flag[i] =
+                   (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
+
+               txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
+               qos_param->tx_op_limit[i] = (u16) txop;
+       }
+       return rc;
+}
+
+/*
+ * we have a generic data element which it may contain QoS information or
+ * parameters element. check the information element length to decide
+ * which type to read
+ */
+static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
+                                            *info_element,
+                                            struct ieee80211_network *network)
+{
+       int rc = 0;
+       struct ieee80211_qos_parameters *qos_param = NULL;
+       struct ieee80211_qos_information_element qos_info_element;
+
+       rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
+
+       if (rc == 0) {
+               network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
+               network->flags |= NETWORK_HAS_QOS_INFORMATION;
+       } else {
+               struct ieee80211_qos_parameter_info param_element;
+
+               rc = ieee80211_read_qos_param_element(&param_element,
+                                                     info_element);
+               if (rc == 0) {
+                       qos_param = &(network->qos_data.parameters);
+                       ieee80211_qos_convert_ac_to_parameters(&param_element,
+                                                              qos_param);
+                       network->flags |= NETWORK_HAS_QOS_PARAMETERS;
+                       network->qos_data.param_count =
+                           param_element.info_element.ac_info & 0x0F;
+               }
+       }
+
+       if (rc == 0) {
+               IEEE80211_DEBUG_QOS("QoS is supported\n");
+               network->qos_data.supported = 1;
+       }
+       return rc;
+}
+
+static int ieee80211_parse_info_param(struct ieee80211_info_element
+                                     *info_element, u16 length,
+                                     struct ieee80211_network *network)
+{
+       u8 i;
+#ifdef CONFIG_IEEE80211_DEBUG
+       char rates_str[64];
+       char *p;
+#endif
 
-       info_element = &beacon->info_element;
-       left = stats->len - ((void *)info_element - (void *)beacon);
-       while (left >= sizeof(struct ieee80211_info_element_hdr)) {
-               if (sizeof(struct ieee80211_info_element_hdr) +
-                   info_element->len > left) {
-                       IEEE80211_DEBUG_SCAN
-                           ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n",
-                            info_element->len +
-                            sizeof(struct ieee80211_info_element), left);
+       while (length >= sizeof(*info_element)) {
+               if (sizeof(*info_element) + info_element->len > length) {
+                       IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
+                                            "info_element->len + 2 > left : "
+                                            "info_element->len+2=%zd left=%d, id=%d.\n",
+                                            info_element->len +
+                                            sizeof(*info_element),
+                                            length, info_element->id);
                        return 1;
                }
 
@@ -837,7 +955,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
                                memset(network->ssid + network->ssid_len, 0,
                                       IW_ESSID_MAX_SIZE - network->ssid_len);
 
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n",
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
                                             network->ssid, network->ssid_len);
                        break;
 
@@ -845,15 +963,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
 #ifdef CONFIG_IEEE80211_DEBUG
                        p = rates_str;
 #endif
-                       network->rates_len =
-                           min(info_element->len, MAX_RATES_LENGTH);
+                       network->rates_len = min(info_element->len,
+                                                MAX_RATES_LENGTH);
                        for (i = 0; i < network->rates_len; i++) {
                                network->rates[i] = info_element->data[i];
 #ifdef CONFIG_IEEE80211_DEBUG
-                               p += snprintf(p,
-                                             sizeof(rates_str) - (p -
-                                                                  rates_str),
-                                             "%02X ", network->rates[i]);
+                               p += snprintf(p, sizeof(rates_str) -
+                                             (p - rates_str), "%02X ",
+                                             network->rates[i]);
 #endif
                                if (ieee80211_is_ofdm_rate
                                    (info_element->data[i])) {
@@ -865,7 +982,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
                                }
                        }
 
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n",
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
                                             rates_str, network->rates_len);
                        break;
 
@@ -873,15 +990,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
 #ifdef CONFIG_IEEE80211_DEBUG
                        p = rates_str;
 #endif
-                       network->rates_ex_len =
-                           min(info_element->len, MAX_RATES_EX_LENGTH);
+                       network->rates_ex_len = min(info_element->len,
+                                                   MAX_RATES_EX_LENGTH);
                        for (i = 0; i < network->rates_ex_len; i++) {
                                network->rates_ex[i] = info_element->data[i];
 #ifdef CONFIG_IEEE80211_DEBUG
-                               p += snprintf(p,
-                                             sizeof(rates_str) - (p -
-                                                                  rates_str),
-                                             "%02X ", network->rates[i]);
+                               p += snprintf(p, sizeof(rates_str) -
+                                             (p - rates_str), "%02X ",
+                                             network->rates[i]);
 #endif
                                if (ieee80211_is_ofdm_rate
                                    (info_element->data[i])) {
@@ -893,40 +1009,51 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
                                }
                        }
 
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
                                             rates_str, network->rates_ex_len);
                        break;
 
                case MFIE_TYPE_DS_SET:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n",
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
                                             info_element->data[0]);
-                       if (stats->freq == IEEE80211_24GHZ_BAND)
-                               network->channel = info_element->data[0];
+                       network->channel = info_element->data[0];
                        break;
 
                case MFIE_TYPE_FH_SET:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n");
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
                        break;
 
                case MFIE_TYPE_CF_SET:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n");
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
                        break;
 
                case MFIE_TYPE_TIM:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n");
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n");
+                       break;
+
+               case MFIE_TYPE_ERP_INFO:
+                       network->erp_value = info_element->data[0];
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
+                                            network->erp_value);
                        break;
 
                case MFIE_TYPE_IBSS_SET:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n");
+                       network->atim_window = info_element->data[0];
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
+                                            network->atim_window);
                        break;
 
                case MFIE_TYPE_CHALLENGE:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n");
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
                        break;
 
                case MFIE_TYPE_GENERIC:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n",
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
                                             info_element->len);
+                       if (!ieee80211_parse_qos_info_param_IE(info_element,
+                                                              network))
+                               break;
+
                        if (info_element->len >= 4 &&
                            info_element->data[0] == 0x00 &&
                            info_element->data[1] == 0x50 &&
@@ -940,7 +1067,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
                        break;
 
                case MFIE_TYPE_RSN:
-                       IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n",
+                       IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
                                             info_element->len);
                        network->rsn_ie_len = min(info_element->len + 2,
                                                  MAX_WPA_IE_LEN);
@@ -948,18 +1075,127 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
                               network->rsn_ie_len);
                        break;
 
+               case MFIE_TYPE_QOS_PARAMETER:
+                       printk(KERN_ERR
+                              "QoS Error need to parse QOS_PARAMETER IE\n");
+                       break;
+
                default:
-                       IEEE80211_DEBUG_SCAN("unsupported IE %d\n",
+                       IEEE80211_DEBUG_MGMT("unsupported IE %d\n",
                                             info_element->id);
                        break;
                }
 
-               left -= sizeof(struct ieee80211_info_element_hdr) +
-                   info_element->len;
-               info_element = (struct ieee80211_info_element *)
-                   &info_element->data[info_element->len];
+               length -= sizeof(*info_element) + info_element->len;
+               info_element =
+                   (struct ieee80211_info_element *)&info_element->
+                   data[info_element->len];
+       }
+
+       return 0;
+}
+
+static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response
+                                      *frame, struct ieee80211_rx_stats *stats)
+{
+       struct ieee80211_network network_resp;
+       struct ieee80211_network *network = &network_resp;
+       struct net_device *dev = ieee->dev;
+
+       network->flags = 0;
+       network->qos_data.active = 0;
+       network->qos_data.supported = 0;
+       network->qos_data.param_count = 0;
+       network->qos_data.old_param_count = 0;
+
+       //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF);
+       network->atim_window = le16_to_cpu(frame->aid);
+       network->listen_interval = le16_to_cpu(frame->status);
+       memcpy(network->bssid, frame->header.addr3, ETH_ALEN);
+       network->capability = le16_to_cpu(frame->capability);
+       network->last_scanned = jiffies;
+       network->rates_len = network->rates_ex_len = 0;
+       network->last_associate = 0;
+       network->ssid_len = 0;
+       network->erp_value =
+           (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0;
+
+       if (stats->freq == IEEE80211_52GHZ_BAND) {
+               /* for A band (No DS info) */
+               network->channel = stats->received_channel;
+       } else
+               network->flags |= NETWORK_HAS_CCK;
+
+       network->wpa_ie_len = 0;
+       network->rsn_ie_len = 0;
+
+       if (ieee80211_parse_info_param
+           (frame->info_element, stats->len - sizeof(*frame), network))
+               return 1;
+
+       network->mode = 0;
+       if (stats->freq == IEEE80211_52GHZ_BAND)
+               network->mode = IEEE_A;
+       else {
+               if (network->flags & NETWORK_HAS_OFDM)
+                       network->mode |= IEEE_G;
+               if (network->flags & NETWORK_HAS_CCK)
+                       network->mode |= IEEE_B;
        }
 
+       if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
+               network->flags |= NETWORK_EMPTY_ESSID;
+
+       memcpy(&network->stats, stats, sizeof(network->stats));
+
+       if (ieee->handle_assoc_response != NULL)
+               ieee->handle_assoc_response(dev, frame, network);
+
+       return 0;
+}
+
+/***************************************************/
+
+static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response
+                                        *beacon,
+                                        struct ieee80211_network *network,
+                                        struct ieee80211_rx_stats *stats)
+{
+       network->qos_data.active = 0;
+       network->qos_data.supported = 0;
+       network->qos_data.param_count = 0;
+       network->qos_data.old_param_count = 0;
+
+       /* Pull out fixed field data */
+       memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
+       network->capability = le16_to_cpu(beacon->capability);
+       network->last_scanned = jiffies;
+       network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
+       network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
+       network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
+       /* Where to pull this? beacon->listen_interval; */
+       network->listen_interval = 0x0A;
+       network->rates_len = network->rates_ex_len = 0;
+       network->last_associate = 0;
+       network->ssid_len = 0;
+       network->flags = 0;
+       network->atim_window = 0;
+       network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
+           0x3 : 0x0;
+
+       if (stats->freq == IEEE80211_52GHZ_BAND) {
+               /* for A band (No DS info) */
+               network->channel = stats->received_channel;
+       } else
+               network->flags |= NETWORK_HAS_CCK;
+
+       network->wpa_ie_len = 0;
+       network->rsn_ie_len = 0;
+
+       if (ieee80211_parse_info_param
+           (beacon->info_element, stats->len - sizeof(*beacon), network))
+               return 1;
+
        network->mode = 0;
        if (stats->freq == IEEE80211_52GHZ_BAND)
                network->mode = IEEE_A;
@@ -1002,6 +1238,9 @@ static inline int is_same_network(struct ieee80211_network *src,
 static inline void update_network(struct ieee80211_network *dst,
                                  struct ieee80211_network *src)
 {
+       int qos_active;
+       u8 old_param;
+
        memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
        dst->capability = src->capability;
        memcpy(dst->rates, src->rates, src->rates_len);
@@ -1017,6 +1256,7 @@ static inline void update_network(struct ieee80211_network *dst,
        dst->beacon_interval = src->beacon_interval;
        dst->listen_interval = src->listen_interval;
        dst->atim_window = src->atim_window;
+       dst->erp_value = src->erp_value;
 
        memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
        dst->wpa_ie_len = src->wpa_ie_len;
@@ -1024,22 +1264,48 @@ static inline void update_network(struct ieee80211_network *dst,
        dst->rsn_ie_len = src->rsn_ie_len;
 
        dst->last_scanned = jiffies;
+       qos_active = src->qos_data.active;
+       old_param = dst->qos_data.old_param_count;
+       if (dst->flags & NETWORK_HAS_QOS_MASK)
+               memcpy(&dst->qos_data, &src->qos_data,
+                      sizeof(struct ieee80211_qos_data));
+       else {
+               dst->qos_data.supported = src->qos_data.supported;
+               dst->qos_data.param_count = src->qos_data.param_count;
+       }
+
+       if (dst->qos_data.supported == 1) {
+               if (dst->ssid_len)
+                       IEEE80211_DEBUG_QOS
+                           ("QoS the network %s is QoS supported\n",
+                            dst->ssid);
+               else
+                       IEEE80211_DEBUG_QOS
+                           ("QoS the network is QoS supported\n");
+       }
+       dst->qos_data.active = qos_active;
+       dst->qos_data.old_param_count = old_param;
+
        /* dst->last_associate is not overwritten */
 }
 
+static inline int is_beacon(int fc)
+{
+       return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
+}
+
 static inline void ieee80211_process_probe_response(struct ieee80211_device
-                                                   *ieee,
-                                                   struct
+                                                   *ieee, struct
                                                    ieee80211_probe_response
-                                                   *beacon,
-                                                   struct ieee80211_rx_stats
+                                                   *beacon, struct ieee80211_rx_stats
                                                    *stats)
 {
+       struct net_device *dev = ieee->dev;
        struct ieee80211_network network;
        struct ieee80211_network *target;
        struct ieee80211_network *oldest = NULL;
 #ifdef CONFIG_IEEE80211_DEBUG
-       struct ieee80211_info_element *info_element = &beacon->info_element;
+       struct ieee80211_info_element *info_element = beacon->info_element;
 #endif
        unsigned long flags;
 
@@ -1070,10 +1336,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device
                                     escape_essid(info_element->data,
                                                  info_element->len),
                                     MAC_ARG(beacon->header.addr3),
-                                    WLAN_FC_GET_STYPE(beacon->header.
-                                                      frame_ctl) ==
-                                    IEEE80211_STYPE_PROBE_RESP ?
-                                    "PROBE RESPONSE" : "BEACON");
+                                    is_beacon(le16_to_cpu
+                                              (beacon->header.
+                                               frame_ctl)) ?
+                                    "BEACON" : "PROBE RESPONSE");
                return;
        }
 
@@ -1122,10 +1388,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device
                                     escape_essid(network.ssid,
                                                  network.ssid_len),
                                     MAC_ARG(network.bssid),
-                                    WLAN_FC_GET_STYPE(beacon->header.
-                                                      frame_ctl) ==
-                                    IEEE80211_STYPE_PROBE_RESP ?
-                                    "PROBE RESPONSE" : "BEACON");
+                                    is_beacon(le16_to_cpu
+                                              (beacon->header.
+                                               frame_ctl)) ?
+                                    "BEACON" : "PROBE RESPONSE");
 #endif
                memcpy(target, &network, sizeof(*target));
                list_add_tail(&target->list, &ieee->network_list);
@@ -1134,34 +1400,60 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device
                                     escape_essid(target->ssid,
                                                  target->ssid_len),
                                     MAC_ARG(target->bssid),
-                                    WLAN_FC_GET_STYPE(beacon->header.
-                                                      frame_ctl) ==
-                                    IEEE80211_STYPE_PROBE_RESP ?
-                                    "PROBE RESPONSE" : "BEACON");
+                                    is_beacon(le16_to_cpu
+                                              (beacon->header.
+                                               frame_ctl)) ?
+                                    "BEACON" : "PROBE RESPONSE");
                update_network(target, &network);
        }
 
        spin_unlock_irqrestore(&ieee->lock, flags);
+
+       if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) {
+               if (ieee->handle_beacon != NULL)
+                       ieee->handle_beacon(dev, beacon, &network);
+       } else {
+               if (ieee->handle_probe_response != NULL)
+                       ieee->handle_probe_response(dev, beacon, &network);
+       }
 }
 
 void ieee80211_rx_mgt(struct ieee80211_device *ieee,
-                     struct ieee80211_hdr *header,
+                     struct ieee80211_hdr_4addr *header,
                      struct ieee80211_rx_stats *stats)
 {
-       switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+       switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
        case IEEE80211_STYPE_ASSOC_RESP:
                IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n",
-                                    WLAN_FC_GET_STYPE(header->frame_ctl));
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
+               ieee80211_handle_assoc_resp(ieee,
+                                           (struct ieee80211_assoc_response *)
+                                           header, stats);
                break;
 
        case IEEE80211_STYPE_REASSOC_RESP:
                IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n",
-                                    WLAN_FC_GET_STYPE(header->frame_ctl));
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
+               break;
+
+       case IEEE80211_STYPE_PROBE_REQ:
+               IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
+
+               if (ieee->handle_probe_request != NULL)
+                       ieee->handle_probe_request(ieee->dev,
+                                                  (struct
+                                                   ieee80211_probe_request *)
+                                                  header, stats);
                break;
 
        case IEEE80211_STYPE_PROBE_RESP:
                IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
-                                    WLAN_FC_GET_STYPE(header->frame_ctl));
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
                IEEE80211_DEBUG_SCAN("Probe response\n");
                ieee80211_process_probe_response(ieee,
                                                 (struct
@@ -1171,20 +1463,46 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
 
        case IEEE80211_STYPE_BEACON:
                IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
-                                    WLAN_FC_GET_STYPE(header->frame_ctl));
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
                IEEE80211_DEBUG_SCAN("Beacon\n");
                ieee80211_process_probe_response(ieee,
                                                 (struct
                                                  ieee80211_probe_response *)
                                                 header, stats);
                break;
+       case IEEE80211_STYPE_AUTH:
 
+               IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
+
+               if (ieee->handle_auth != NULL)
+                       ieee->handle_auth(ieee->dev,
+                                         (struct ieee80211_auth *)header);
+               break;
+
+       case IEEE80211_STYPE_DISASSOC:
+               if (ieee->handle_disassoc != NULL)
+                       ieee->handle_disassoc(ieee->dev,
+                                             (struct ieee80211_disassoc *)
+                                             header);
+               break;
+
+       case IEEE80211_STYPE_DEAUTH:
+               printk("DEAUTH from AP\n");
+               if (ieee->handle_deauth != NULL)
+                       ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *)
+                                           header);
+               break;
        default:
                IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
-                                    WLAN_FC_GET_STYPE(header->frame_ctl));
+                                    WLAN_FC_GET_STYPE(le16_to_cpu
+                                                      (header->frame_ctl)));
                IEEE80211_WARNING("%s: Unknown management packet: %d\n",
                                  ieee->dev->name,
-                                 WLAN_FC_GET_STYPE(header->frame_ctl));
+                                 WLAN_FC_GET_STYPE(le16_to_cpu
+                                                   (header->frame_ctl)));
                break;
        }
 }
index eed07bbbe6b6282972d47815bf1f43cd7e69e018..95ccbadbf55b49cd2809ccd6b4d11ed510c7f8e9 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+  Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms of version 2 of the GNU General Public License as
@@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes.
 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
 
-static inline int ieee80211_put_snap(u8 * data, u16 h_proto)
+static inline int ieee80211_copy_snap(u8 * data, u16 h_proto)
 {
        struct ieee80211_snap_hdr *snap;
        u8 *oui;
@@ -157,31 +157,14 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
        struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
        int res;
 
-#ifdef CONFIG_IEEE80211_CRYPT_TKIP
-       struct ieee80211_hdr *header;
-
-       if (ieee->tkip_countermeasures &&
-           crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
-               header = (struct ieee80211_hdr *)frag->data;
-               if (net_ratelimit()) {
-                       printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
-                              "TX packet to " MAC_FMT "\n",
-                              ieee->dev->name, MAC_ARG(header->addr1));
-               }
+       if (crypt == NULL)
                return -1;
-       }
-#endif
+
        /* To encrypt, frame format is:
         * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
-
-       // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
-       /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
-        * call both MSDU and MPDU encryption functions from here. */
        atomic_inc(&crypt->refcnt);
        res = 0;
-       if (crypt->ops->encrypt_msdu)
-               res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
-       if (res == 0 && crypt->ops->encrypt_mpdu)
+       if (crypt->ops && crypt->ops->encrypt_mpdu)
                res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
 
        atomic_dec(&crypt->refcnt);
@@ -207,7 +190,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb)
 }
 
 static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
-                                                gfp_t gfp_mask)
+                                                int headroom, gfp_t gfp_mask)
 {
        struct ieee80211_txb *txb;
        int i;
@@ -221,11 +204,13 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
        txb->frag_size = txb_size;
 
        for (i = 0; i < nr_frags; i++) {
-               txb->fragments[i] = dev_alloc_skb(txb_size);
+               txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
+                                                   gfp_mask);
                if (unlikely(!txb->fragments[i])) {
                        i--;
                        break;
                }
+               skb_reserve(txb->fragments[i], headroom);
        }
        if (unlikely(i != nr_frags)) {
                while (i >= 0)
@@ -236,25 +221,31 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
        return txb;
 }
 
-/* SKBs are added to the ieee->tx_queue. */
+/* Incoming skb is converted to a txb which consists of
+ * a block of 802.11 fragment packets (stored as skbs) */
 int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ieee80211_device *ieee = netdev_priv(dev);
        struct ieee80211_txb *txb = NULL;
-       struct ieee80211_hdr *frag_hdr;
-       int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
+       struct ieee80211_hdr_3addr *frag_hdr;
+       int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
+           rts_required;
        unsigned long flags;
        struct net_device_stats *stats = &ieee->stats;
-       int ether_type, encrypt;
+       int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
        int bytes, fc, hdr_len;
        struct sk_buff *skb_frag;
-       struct ieee80211_hdr header = { /* Ensure zero initialized */
+       struct ieee80211_hdr_3addr header = {   /* Ensure zero initialized */
                .duration_id = 0,
                .seq_ctl = 0
        };
        u8 dest[ETH_ALEN], src[ETH_ALEN];
-
        struct ieee80211_crypt_data *crypt;
+       int priority = skb->priority;
+       int snapped = 0;
+
+       if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
+               return NETDEV_TX_BUSY;
 
        spin_lock_irqsave(&ieee->lock, flags);
 
@@ -276,7 +267,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
        crypt = ieee->crypt[ieee->tx_keyidx];
 
        encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
-           ieee->host_encrypt && crypt && crypt->ops;
+           ieee->sec.encrypt;
+
+       host_encrypt = ieee->host_encrypt && encrypt && crypt;
+       host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
+       host_build_iv = ieee->host_build_iv && encrypt && crypt;
 
        if (!encrypt && ieee->ieee802_1x &&
            ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
@@ -285,8 +280,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* Save source and destination addresses */
-       memcpy(&dest, skb->data, ETH_ALEN);
-       memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
+       memcpy(dest, skb->data, ETH_ALEN);
+       memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
 
        /* Advance the SKB to the start of the payload */
        skb_pull(skb, sizeof(struct ethhdr));
@@ -294,7 +289,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Determine total amount of storage required for TXB packets */
        bytes = skb->len + SNAP_SIZE + sizeof(u16);
 
-       if (encrypt)
+       if (host_encrypt)
                fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
                    IEEE80211_FCTL_PROTECTED;
        else
@@ -302,70 +297,144 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (ieee->iw_mode == IW_MODE_INFRA) {
                fc |= IEEE80211_FCTL_TODS;
-               /* To DS: Addr1 = BSSID, Addr2 = SA,
-                  Addr3 = DA */
-               memcpy(&header.addr1, ieee->bssid, ETH_ALEN);
-               memcpy(&header.addr2, &src, ETH_ALEN);
-               memcpy(&header.addr3, &dest, ETH_ALEN);
+               /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
+               memcpy(header.addr1, ieee->bssid, ETH_ALEN);
+               memcpy(header.addr2, src, ETH_ALEN);
+               memcpy(header.addr3, dest, ETH_ALEN);
        } else if (ieee->iw_mode == IW_MODE_ADHOC) {
-               /* not From/To DS: Addr1 = DA, Addr2 = SA,
-                  Addr3 = BSSID */
-               memcpy(&header.addr1, dest, ETH_ALEN);
-               memcpy(&header.addr2, src, ETH_ALEN);
-               memcpy(&header.addr3, ieee->bssid, ETH_ALEN);
+               /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
+               memcpy(header.addr1, dest, ETH_ALEN);
+               memcpy(header.addr2, src, ETH_ALEN);
+               memcpy(header.addr3, ieee->bssid, ETH_ALEN);
        }
        header.frame_ctl = cpu_to_le16(fc);
        hdr_len = IEEE80211_3ADDR_LEN;
 
-       /* Determine fragmentation size based on destination (multicast
-        * and broadcast are not fragmented) */
-       if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest))
-               frag_size = MAX_FRAG_THRESHOLD;
-       else
-               frag_size = ieee->fts;
+       /* Encrypt msdu first on the whole data packet. */
+       if ((host_encrypt || host_encrypt_msdu) &&
+           crypt && crypt->ops && crypt->ops->encrypt_msdu) {
+               int res = 0;
+               int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
+                   crypt->ops->extra_msdu_postfix_len;
+               struct sk_buff *skb_new = dev_alloc_skb(len);
+
+               if (unlikely(!skb_new))
+                       goto failed;
+
+               skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
+               memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
+               snapped = 1;
+               ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
+                                   ether_type);
+               memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
+               res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
+               if (res < 0) {
+                       IEEE80211_ERROR("msdu encryption failed\n");
+                       dev_kfree_skb_any(skb_new);
+                       goto failed;
+               }
+               dev_kfree_skb_any(skb);
+               skb = skb_new;
+               bytes += crypt->ops->extra_msdu_prefix_len +
+                   crypt->ops->extra_msdu_postfix_len;
+               skb_pull(skb, hdr_len);
+       }
 
-       /* Determine amount of payload per fragment.  Regardless of if
-        * this stack is providing the full 802.11 header, one will
-        * eventually be affixed to this fragment -- so we must account for
-        * it when determining the amount of payload space. */
-       bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
-       if (ieee->config &
-           (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
-               bytes_per_frag -= IEEE80211_FCS_LEN;
-
-       /* Each fragment may need to have room for encryptiong pre/postfix */
-       if (encrypt)
-               bytes_per_frag -= crypt->ops->extra_prefix_len +
-                   crypt->ops->extra_postfix_len;
-
-       /* Number of fragments is the total bytes_per_frag /
-        * payload_per_fragment */
-       nr_frags = bytes / bytes_per_frag;
-       bytes_last_frag = bytes % bytes_per_frag;
-       if (bytes_last_frag)
+       if (host_encrypt || ieee->host_open_frag) {
+               /* Determine fragmentation size based on destination (multicast
+                * and broadcast are not fragmented) */
+               if (is_multicast_ether_addr(dest) ||
+                   is_broadcast_ether_addr(dest))
+                       frag_size = MAX_FRAG_THRESHOLD;
+               else
+                       frag_size = ieee->fts;
+
+               /* Determine amount of payload per fragment.  Regardless of if
+                * this stack is providing the full 802.11 header, one will
+                * eventually be affixed to this fragment -- so we must account
+                * for it when determining the amount of payload space. */
+               bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
+               if (ieee->config &
+                   (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+                       bytes_per_frag -= IEEE80211_FCS_LEN;
+
+               /* Each fragment may need to have room for encryptiong
+                * pre/postfix */
+               if (host_encrypt)
+                       bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+                           crypt->ops->extra_mpdu_postfix_len;
+
+               /* Number of fragments is the total
+                * bytes_per_frag / payload_per_fragment */
+               nr_frags = bytes / bytes_per_frag;
+               bytes_last_frag = bytes % bytes_per_frag;
+               if (bytes_last_frag)
+                       nr_frags++;
+               else
+                       bytes_last_frag = bytes_per_frag;
+       } else {
+               nr_frags = 1;
+               bytes_per_frag = bytes_last_frag = bytes;
+               frag_size = bytes + IEEE80211_3ADDR_LEN;
+       }
+
+       rts_required = (frag_size > ieee->rts
+                       && ieee->config & CFG_IEEE80211_RTS);
+       if (rts_required)
                nr_frags++;
-       else
-               bytes_last_frag = bytes_per_frag;
 
        /* When we allocate the TXB we allocate enough space for the reserve
         * and full fragment bytes (bytes_per_frag doesn't include prefix,
         * postfix, header, FCS, etc.) */
-       txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
+       txb = ieee80211_alloc_txb(nr_frags, frag_size,
+                                 ieee->tx_headroom, GFP_ATOMIC);
        if (unlikely(!txb)) {
                printk(KERN_WARNING "%s: Could not allocate TXB\n",
                       ieee->dev->name);
                goto failed;
        }
        txb->encrypted = encrypt;
-       txb->payload_size = bytes;
+       if (host_encrypt)
+               txb->payload_size = frag_size * (nr_frags - 1) +
+                   bytes_last_frag;
+       else
+               txb->payload_size = bytes;
+
+       if (rts_required) {
+               skb_frag = txb->fragments[0];
+               frag_hdr =
+                   (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
+
+               /*
+                * Set header frame_ctl to the RTS.
+                */
+               header.frame_ctl =
+                   cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+               memcpy(frag_hdr, &header, hdr_len);
 
-       for (i = 0; i < nr_frags; i++) {
+               /*
+                * Restore header frame_ctl to the original data setting.
+                */
+               header.frame_ctl = cpu_to_le16(fc);
+
+               if (ieee->config &
+                   (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+                       skb_put(skb_frag, 4);
+
+               txb->rts_included = 1;
+               i = 1;
+       } else
+               i = 0;
+
+       for (; i < nr_frags; i++) {
                skb_frag = txb->fragments[i];
 
-               if (encrypt)
-                       skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
+               if (host_encrypt || host_build_iv)
+                       skb_reserve(skb_frag,
+                                   crypt->ops->extra_mpdu_prefix_len);
 
-               frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len);
+               frag_hdr =
+                   (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
                memcpy(frag_hdr, &header, hdr_len);
 
                /* If this is not the last fragment, then add the MOREFRAGS
@@ -379,11 +448,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
                        bytes = bytes_last_frag;
                }
 
-               /* Put a SNAP header on the first fragment */
-               if (i == 0) {
-                       ieee80211_put_snap(skb_put
-                                          (skb_frag, SNAP_SIZE + sizeof(u16)),
-                                          ether_type);
+               if (i == 0 && !snapped) {
+                       ieee80211_copy_snap(skb_put
+                                           (skb_frag, SNAP_SIZE + sizeof(u16)),
+                                           ether_type);
                        bytes -= SNAP_SIZE + sizeof(u16);
                }
 
@@ -394,8 +462,19 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
 
                /* Encryption routine will move the header forward in order
                 * to insert the IV between the header and the payload */
-               if (encrypt)
+               if (host_encrypt)
                        ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
+               else if (host_build_iv) {
+                       struct ieee80211_crypt_data *crypt;
+
+                       crypt = ieee->crypt[ieee->tx_keyidx];
+                       atomic_inc(&crypt->refcnt);
+                       if (crypt->ops->build_iv)
+                               crypt->ops->build_iv(skb_frag, hdr_len,
+                                                    crypt->priv);
+                       atomic_dec(&crypt->refcnt);
+               }
+
                if (ieee->config &
                    (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
                        skb_put(skb_frag, 4);
@@ -407,11 +486,20 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
        dev_kfree_skb_any(skb);
 
        if (txb) {
-               if ((*ieee->hard_start_xmit) (txb, dev) == 0) {
+               int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
+               if (ret == 0) {
                        stats->tx_packets++;
                        stats->tx_bytes += txb->payload_size;
                        return 0;
                }
+
+               if (ret == NETDEV_TX_BUSY) {
+                       printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
+                              "driver should report queue full via "
+                              "ieee_device->is_queue_full.\n",
+                              ieee->dev->name);
+               }
+
                ieee80211_txb_free(txb);
        }
 
@@ -422,7 +510,72 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
        netif_stop_queue(dev);
        stats->tx_errors++;
        return 1;
+}
+
+/* Incoming 802.11 strucure is converted to a TXB
+ * a block of 802.11 fragment packets (stored as skbs) */
+int ieee80211_tx_frame(struct ieee80211_device *ieee,
+                      struct ieee80211_hdr *frame, int len)
+{
+       struct ieee80211_txb *txb = NULL;
+       unsigned long flags;
+       struct net_device_stats *stats = &ieee->stats;
+       struct sk_buff *skb_frag;
+       int priority = -1;
+
+       spin_lock_irqsave(&ieee->lock, flags);
 
+       /* If there is no driver handler to take the TXB, dont' bother
+        * creating it... */
+       if (!ieee->hard_start_xmit) {
+               printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
+               goto success;
+       }
+
+       if (unlikely(len < 24)) {
+               printk(KERN_WARNING "%s: skb too small (%d).\n",
+                      ieee->dev->name, len);
+               goto success;
+       }
+
+       /* When we allocate the TXB we allocate enough space for the reserve
+        * and full fragment bytes (bytes_per_frag doesn't include prefix,
+        * postfix, header, FCS, etc.) */
+       txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC);
+       if (unlikely(!txb)) {
+               printk(KERN_WARNING "%s: Could not allocate TXB\n",
+                      ieee->dev->name);
+               goto failed;
+       }
+       txb->encrypted = 0;
+       txb->payload_size = len;
+
+       skb_frag = txb->fragments[0];
+
+       memcpy(skb_put(skb_frag, len), frame, len);
+
+       if (ieee->config &
+           (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+               skb_put(skb_frag, 4);
+
+      success:
+       spin_unlock_irqrestore(&ieee->lock, flags);
+
+       if (txb) {
+               if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
+                       stats->tx_packets++;
+                       stats->tx_bytes += txb->payload_size;
+                       return 0;
+               }
+               ieee80211_txb_free(txb);
+       }
+       return 0;
+
+      failed:
+       spin_unlock_irqrestore(&ieee->lock, flags);
+       stats->tx_errors++;
+       return 1;
 }
 
+EXPORT_SYMBOL(ieee80211_tx_frame);
 EXPORT_SYMBOL(ieee80211_txb_free);
index 94882f39b0728f9ec92c11f7545f9755bdd10c90..1ce7af9bec35a167b8e17512ec7ab716a2013549 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright(c) 2004 Intel Corporation. All rights reserved.
+  Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
 
   Portions of this file are based on the WEP enablement code provided by the
   Host AP project hostap-drivers v0.1.3
@@ -32,6 +32,7 @@
 
 #include <linux/kmod.h>
 #include <linux/module.h>
+#include <linux/jiffies.h>
 
 #include <net/ieee80211.h>
 #include <linux/wireless.h>
@@ -140,18 +141,41 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
                start = iwe_stream_add_point(start, stop, &iwe, custom);
 
        /* Add quality statistics */
-       /* TODO: Fix these values... */
        iwe.cmd = IWEVQUAL;
-       iwe.u.qual.qual = network->stats.signal;
-       iwe.u.qual.level = network->stats.rssi;
-       iwe.u.qual.noise = network->stats.noise;
-       iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
-       if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
-               iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
-       if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
+       iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
+           IW_QUAL_NOISE_UPDATED;
+
+       if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) {
+               iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID |
+                   IW_QUAL_LEVEL_INVALID;
+               iwe.u.qual.qual = 0;
+               iwe.u.qual.level = 0;
+       } else {
+               iwe.u.qual.level = network->stats.rssi;
+               if (ieee->perfect_rssi == ieee->worst_rssi)
+                       iwe.u.qual.qual = 100;
+               else
+                       iwe.u.qual.qual =
+                           (100 *
+                            (ieee->perfect_rssi - ieee->worst_rssi) *
+                            (ieee->perfect_rssi - ieee->worst_rssi) -
+                            (ieee->perfect_rssi - network->stats.rssi) *
+                            (15 * (ieee->perfect_rssi - ieee->worst_rssi) +
+                             62 * (ieee->perfect_rssi - network->stats.rssi))) /
+                           ((ieee->perfect_rssi - ieee->worst_rssi) *
+                            (ieee->perfect_rssi - ieee->worst_rssi));
+               if (iwe.u.qual.qual > 100)
+                       iwe.u.qual.qual = 100;
+               else if (iwe.u.qual.qual < 1)
+                       iwe.u.qual.qual = 0;
+       }
+
+       if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) {
                iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
-       if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
-               iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
+               iwe.u.qual.noise = 0;
+       } else {
+               iwe.u.qual.noise = network->stats.noise;
+       }
 
        start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN);
 
@@ -162,7 +186,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
        if (iwe.u.data.length)
                start = iwe_stream_add_point(start, stop, &iwe, custom);
 
-       if (ieee->wpa_enabled && network->wpa_ie_len) {
+       if (network->wpa_ie_len) {
                char buf[MAX_WPA_IE_LEN * 2 + 30];
 
                u8 *p = buf;
@@ -177,7 +201,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
                start = iwe_stream_add_point(start, stop, &iwe, buf);
        }
 
-       if (ieee->wpa_enabled && network->rsn_ie_len) {
+       if (network->rsn_ie_len) {
                char buf[MAX_WPA_IE_LEN * 2 + 30];
 
                u8 *p = buf;
@@ -197,8 +221,8 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
        iwe.cmd = IWEVCUSTOM;
        p = custom;
        p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
-                     " Last beacon: %lums ago",
-                     (jiffies - network->last_scanned) / (HZ / 100));
+                     " Last beacon: %dms ago",
+                     jiffies_to_msecs(jiffies - network->last_scanned));
        iwe.u.data.length = p - custom;
        if (iwe.u.data.length)
                start = iwe_stream_add_point(start, stop, &iwe, custom);
@@ -228,13 +252,13 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
                        ev = ipw2100_translate_scan(ieee, ev, stop, network);
                else
                        IEEE80211_DEBUG_SCAN("Not showing network '%s ("
-                                            MAC_FMT ")' due to age (%lums).\n",
+                                            MAC_FMT ")' due to age (%dms).\n",
                                             escape_essid(network->ssid,
                                                          network->ssid_len),
                                             MAC_ARG(network->bssid),
-                                            (jiffies -
-                                             network->last_scanned) / (HZ /
-                                                                       100));
+                                            jiffies_to_msecs(jiffies -
+                                                             network->
+                                                             last_scanned));
        }
 
        spin_unlock_irqrestore(&ieee->lock, flags);
@@ -258,6 +282,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
        };
        int i, key, key_provided, len;
        struct ieee80211_crypt_data **crypt;
+       int host_crypto = ieee->host_encrypt || ieee->host_decrypt;
 
        IEEE80211_DEBUG_WX("SET_ENCODE\n");
 
@@ -298,15 +323,17 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
 
                if (i == WEP_KEYS) {
                        sec.enabled = 0;
+                       sec.encrypt = 0;
                        sec.level = SEC_LEVEL_0;
-                       sec.flags |= SEC_ENABLED | SEC_LEVEL;
+                       sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT;
                }
 
                goto done;
        }
 
        sec.enabled = 1;
-       sec.flags |= SEC_ENABLED;
+       sec.encrypt = 1;
+       sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
 
        if (*crypt != NULL && (*crypt)->ops != NULL &&
            strcmp((*crypt)->ops->name, "WEP") != 0) {
@@ -315,7 +342,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
                ieee80211_crypt_delayed_deinit(ieee, crypt);
        }
 
-       if (*crypt == NULL) {
+       if (*crypt == NULL && host_crypto) {
                struct ieee80211_crypt_data *new_crypt;
 
                /* take WEP into use */
@@ -355,49 +382,56 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
                                   key, escape_essid(sec.keys[key], len),
                                   erq->length, len);
                sec.key_sizes[key] = len;
-               (*crypt)->ops->set_key(sec.keys[key], len, NULL,
-                                      (*crypt)->priv);
+               if (*crypt)
+                       (*crypt)->ops->set_key(sec.keys[key], len, NULL,
+                                              (*crypt)->priv);
                sec.flags |= (1 << key);
                /* This ensures a key will be activated if no key is
                 * explicitely set */
                if (key == sec.active_key)
                        sec.flags |= SEC_ACTIVE_KEY;
+
        } else {
-               len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
-                                            NULL, (*crypt)->priv);
-               if (len == 0) {
-                       /* Set a default key of all 0 */
-                       IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
-                                          key);
-                       memset(sec.keys[key], 0, 13);
-                       (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
-                                              (*crypt)->priv);
-                       sec.key_sizes[key] = 13;
-                       sec.flags |= (1 << key);
+               if (host_crypto) {
+                       len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
+                                                    NULL, (*crypt)->priv);
+                       if (len == 0) {
+                               /* Set a default key of all 0 */
+                               IEEE80211_DEBUG_WX("Setting key %d to all "
+                                                  "zero.\n", key);
+                               memset(sec.keys[key], 0, 13);
+                               (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
+                                                      (*crypt)->priv);
+                               sec.key_sizes[key] = 13;
+                               sec.flags |= (1 << key);
+                       }
                }
-
                /* No key data - just set the default TX key index */
                if (key_provided) {
-                       IEEE80211_DEBUG_WX
-                           ("Setting key %d to default Tx key.\n", key);
+                       IEEE80211_DEBUG_WX("Setting key %d to default Tx "
+                                          "key.\n", key);
                        ieee->tx_keyidx = key;
                        sec.active_key = key;
                        sec.flags |= SEC_ACTIVE_KEY;
                }
        }
-
-      done:
-       ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
-       sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
-       sec.flags |= SEC_AUTH_MODE;
-       IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
-                          "OPEN" : "SHARED KEY");
+       if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) {
+               ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
+               sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN :
+                   WLAN_AUTH_SHARED_KEY;
+               sec.flags |= SEC_AUTH_MODE;
+               IEEE80211_DEBUG_WX("Auth: %s\n",
+                                  sec.auth_mode == WLAN_AUTH_OPEN ?
+                                  "OPEN" : "SHARED KEY");
+       }
 
        /* For now we just support WEP, so only set that security level...
         * TODO: When WPA is added this is one place that needs to change */
        sec.flags |= SEC_LEVEL;
        sec.level = SEC_LEVEL_1;        /* 40 and 104 bit WEP */
+       sec.encode_alg[key] = SEC_ALG_WEP;
 
+      done:
        if (ieee->set_security)
                ieee->set_security(dev, &sec);
 
@@ -422,6 +456,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
        struct iw_point *erq = &(wrqu->encoding);
        int len, key;
        struct ieee80211_crypt_data *crypt;
+       struct ieee80211_security *sec = &ieee->sec;
 
        IEEE80211_DEBUG_WX("GET_ENCODE\n");
 
@@ -436,23 +471,16 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
        crypt = ieee->crypt[key];
        erq->flags = key + 1;
 
-       if (crypt == NULL || crypt->ops == NULL) {
+       if (!sec->enabled) {
                erq->length = 0;
                erq->flags |= IW_ENCODE_DISABLED;
                return 0;
        }
 
-       if (strcmp(crypt->ops->name, "WEP") != 0) {
-               /* only WEP is supported with wireless extensions, so just
-                * report that encryption is used */
-               erq->length = 0;
-               erq->flags |= IW_ENCODE_ENABLED;
-               return 0;
-       }
+       len = sec->key_sizes[key];
+       memcpy(keybuf, sec->keys[key], len);
 
-       len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv);
        erq->length = (len >= 0 ? len : 0);
-
        erq->flags |= IW_ENCODE_ENABLED;
 
        if (ieee->open_wep)
@@ -463,6 +491,240 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
        return 0;
 }
 
+int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
+                              struct iw_request_info *info,
+                              union iwreq_data *wrqu, char *extra)
+{
+       struct net_device *dev = ieee->dev;
+       struct iw_point *encoding = &wrqu->encoding;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+       int i, idx, ret = 0;
+       int group_key = 0;
+       const char *alg, *module;
+       struct ieee80211_crypto_ops *ops;
+       struct ieee80211_crypt_data **crypt;
+
+       struct ieee80211_security sec = {
+               .flags = 0,
+       };
+
+       idx = encoding->flags & IW_ENCODE_INDEX;
+       if (idx) {
+               if (idx < 1 || idx > WEP_KEYS)
+                       return -EINVAL;
+               idx--;
+       } else
+               idx = ieee->tx_keyidx;
+
+       if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+               crypt = &ieee->crypt[idx];
+               group_key = 1;
+       } else {
+               if (idx != 0)
+                       return -EINVAL;
+               if (ieee->iw_mode == IW_MODE_INFRA)
+                       crypt = &ieee->crypt[idx];
+               else
+                       return -EINVAL;
+       }
+
+       sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
+       if ((encoding->flags & IW_ENCODE_DISABLED) ||
+           ext->alg == IW_ENCODE_ALG_NONE) {
+               if (*crypt)
+                       ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+               for (i = 0; i < WEP_KEYS; i++)
+                       if (ieee->crypt[i] != NULL)
+                               break;
+
+               if (i == WEP_KEYS) {
+                       sec.enabled = 0;
+                       sec.encrypt = 0;
+                       sec.level = SEC_LEVEL_0;
+                       sec.flags |= SEC_LEVEL;
+               }
+               goto done;
+       }
+
+       sec.enabled = 1;
+       sec.encrypt = 1;
+
+       if (group_key ? !ieee->host_mc_decrypt :
+           !(ieee->host_encrypt || ieee->host_decrypt ||
+             ieee->host_encrypt_msdu))
+               goto skip_host_crypt;
+
+       switch (ext->alg) {
+       case IW_ENCODE_ALG_WEP:
+               alg = "WEP";
+               module = "ieee80211_crypt_wep";
+               break;
+       case IW_ENCODE_ALG_TKIP:
+               alg = "TKIP";
+               module = "ieee80211_crypt_tkip";
+               break;
+       case IW_ENCODE_ALG_CCMP:
+               alg = "CCMP";
+               module = "ieee80211_crypt_ccmp";
+               break;
+       default:
+               IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
+                                  dev->name, ext->alg);
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ops = ieee80211_get_crypto_ops(alg);
+       if (ops == NULL) {
+               request_module(module);
+               ops = ieee80211_get_crypto_ops(alg);
+       }
+       if (ops == NULL) {
+               IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
+                                  dev->name, ext->alg);
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (*crypt == NULL || (*crypt)->ops != ops) {
+               struct ieee80211_crypt_data *new_crypt;
+
+               ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+               new_crypt = (struct ieee80211_crypt_data *)
+                   kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+               if (new_crypt == NULL) {
+                       ret = -ENOMEM;
+                       goto done;
+               }
+               memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+               new_crypt->ops = ops;
+               if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
+                       new_crypt->priv = new_crypt->ops->init(idx);
+               if (new_crypt->priv == NULL) {
+                       kfree(new_crypt);
+                       ret = -EINVAL;
+                       goto done;
+               }
+               *crypt = new_crypt;
+       }
+
+       if (ext->key_len > 0 && (*crypt)->ops->set_key &&
+           (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
+                                  (*crypt)->priv) < 0) {
+               IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
+               ret = -EINVAL;
+               goto done;
+       }
+
+      skip_host_crypt:
+       if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+               ieee->tx_keyidx = idx;
+               sec.active_key = idx;
+               sec.flags |= SEC_ACTIVE_KEY;
+       }
+
+       if (ext->alg != IW_ENCODE_ALG_NONE) {
+               memcpy(sec.keys[idx], ext->key, ext->key_len);
+               sec.key_sizes[idx] = ext->key_len;
+               sec.flags |= (1 << idx);
+               if (ext->alg == IW_ENCODE_ALG_WEP) {
+                       sec.encode_alg[idx] = SEC_ALG_WEP;
+                       sec.flags |= SEC_LEVEL;
+                       sec.level = SEC_LEVEL_1;
+               } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
+                       sec.encode_alg[idx] = SEC_ALG_TKIP;
+                       sec.flags |= SEC_LEVEL;
+                       sec.level = SEC_LEVEL_2;
+               } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
+                       sec.encode_alg[idx] = SEC_ALG_CCMP;
+                       sec.flags |= SEC_LEVEL;
+                       sec.level = SEC_LEVEL_3;
+               }
+               /* Don't set sec level for group keys. */
+               if (group_key)
+                       sec.flags &= ~SEC_LEVEL;
+       }
+      done:
+       if (ieee->set_security)
+               ieee->set_security(ieee->dev, &sec);
+
+       /*
+        * Do not reset port if card is in Managed mode since resetting will
+        * generate new IEEE 802.11 authentication which may end up in looping
+        * with IEEE 802.1X. If your hardware requires a reset after WEP
+        * configuration (for example... Prism2), implement the reset_port in
+        * the callbacks structures used to initialize the 802.11 stack.
+        */
+       if (ieee->reset_on_keychange &&
+           ieee->iw_mode != IW_MODE_INFRA &&
+           ieee->reset_port && ieee->reset_port(dev)) {
+               IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
+                              struct iw_request_info *info,
+                              union iwreq_data *wrqu, char *extra)
+{
+       struct iw_point *encoding = &wrqu->encoding;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+       struct ieee80211_security *sec = &ieee->sec;
+       int idx, max_key_len;
+
+       max_key_len = encoding->length - sizeof(*ext);
+       if (max_key_len < 0)
+               return -EINVAL;
+
+       idx = encoding->flags & IW_ENCODE_INDEX;
+       if (idx) {
+               if (idx < 1 || idx > WEP_KEYS)
+                       return -EINVAL;
+               idx--;
+       } else
+               idx = ieee->tx_keyidx;
+
+       if (!ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
+               if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
+                       return -EINVAL;
+
+       encoding->flags = idx + 1;
+       memset(ext, 0, sizeof(*ext));
+
+       if (!sec->enabled) {
+               ext->alg = IW_ENCODE_ALG_NONE;
+               ext->key_len = 0;
+               encoding->flags |= IW_ENCODE_DISABLED;
+       } else {
+               if (sec->encode_alg[idx] == SEC_ALG_WEP)
+                       ext->alg = IW_ENCODE_ALG_WEP;
+               else if (sec->encode_alg[idx] == SEC_ALG_TKIP)
+                       ext->alg = IW_ENCODE_ALG_TKIP;
+               else if (sec->encode_alg[idx] == SEC_ALG_CCMP)
+                       ext->alg = IW_ENCODE_ALG_CCMP;
+               else
+                       return -EINVAL;
+
+               ext->key_len = sec->key_sizes[idx];
+               memcpy(ext->key, sec->keys[idx], ext->key_len);
+               encoding->flags |= IW_ENCODE_ENABLED;
+               if (ext->key_len &&
+                   (ext->alg == IW_ENCODE_ALG_TKIP ||
+                    ext->alg == IW_ENCODE_ALG_CCMP))
+                       ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
+
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
+EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
+
 EXPORT_SYMBOL(ieee80211_wx_get_scan);
 EXPORT_SYMBOL(ieee80211_wx_set_encode);
 EXPORT_SYMBOL(ieee80211_wx_get_encode);
index 74f2207e131ae06b165f6203eb55b7d7e9b9456f..4ec4b2ca6ab15440eabc8f9e22b80a592d0067d1 100644 (file)
@@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
                        break;
                ret = 0;
                if (ifa->ifa_mask != sin->sin_addr.s_addr) {
+                       u32 old_mask = ifa->ifa_mask;
                        inet_del_ifa(in_dev, ifap, 0);
                        ifa->ifa_mask = sin->sin_addr.s_addr;
                        ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
@@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
                        if ((dev->flags & IFF_BROADCAST) &&
                            (ifa->ifa_prefixlen < 31) &&
                            (ifa->ifa_broadcast ==
-                            (ifa->ifa_local|~ifa->ifa_mask))) {
+                            (ifa->ifa_local|~old_mask))) {
                                ifa->ifa_broadcast = (ifa->ifa_local |
                                                      ~sin->sin_addr.s_addr);
                        }
index 0093ea08c7f5378c8bbd80e966007c9eaf87f382..66247f38b3716193637ab4c79020e22d9f65e1dd 100644 (file)
@@ -2404,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
                prefix = htonl(l->key);
 
                list_for_each_entry_rcu(fa, &li->falh, fa_list) {
-                       const struct fib_info *fi = rcu_dereference(fa->fa_info);
+                       const struct fib_info *fi = fa->fa_info;
                        unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
 
                        if (fa->fa_type == RTN_BROADCAST
index 90dca711ac9f3061d8b247d1e9f1b50e40419e40..175e093ec5645209ab809dd52ccce0667a020e82 100644 (file)
@@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops)
        struct inet_sock *inet;
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
+       for_each_cpu(i) {
                int err;
 
-               if (!cpu_possible(i))
-                       continue;
-
                err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
                                       &per_cpu(__icmp_socket, i));
 
index 1ad5202e556b5d1862f5c0b1c9895c45e81e4c04..87e350069abb1cf8a2654bb652c609a4a429cb96 100644 (file)
@@ -1023,10 +1023,7 @@ ssize_t  ip_append_page(struct sock *sk, struct page *page,
                        int alloclen;
 
                        skb_prev = skb;
-                       if (skb_prev)
-                               fraggap = skb_prev->len - maxfraglen;
-                       else
-                               fraggap = 0;
+                       fraggap = skb_prev->len - maxfraglen;
 
                        alloclen = fragheaderlen + hh_len + fraggap + 15;
                        skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
index 07a80b56e8dce24f421e98d4efb89589748bed3a..422ab68ee7fb62599cb410c0b8aac61bda4647d6 100644 (file)
@@ -50,7 +50,7 @@
 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
 #include <linux/netfilter_ipv4/listhelp.h>
 
-#define IP_CONNTRACK_VERSION   "2.3"
+#define IP_CONNTRACK_VERSION   "2.4"
 
 #if 0
 #define DEBUGP printk
@@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
 static int ip_conntrack_hash_rnd_initted;
 static unsigned int ip_conntrack_hash_rnd;
 
-static u_int32_t
-hash_conntrack(const struct ip_conntrack_tuple *tuple)
+static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
+                           unsigned int size, unsigned int rnd)
 {
-#if 0
-       dump_tuple(tuple);
-#endif
        return (jhash_3words(tuple->src.ip,
                             (tuple->dst.ip ^ tuple->dst.protonum),
                             (tuple->src.u.all | (tuple->dst.u.all << 16)),
-                            ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
+                            rnd) % size);
+}
+
+static u_int32_t
+hash_conntrack(const struct ip_conntrack_tuple *tuple)
+{
+       return __hash_conntrack(tuple, ip_conntrack_htable_size,
+                               ip_conntrack_hash_rnd);
 }
 
 int
@@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data)
        return 1;
 }
 
-static void free_conntrack_hash(void)
+static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
 {
-       if (ip_conntrack_vmalloc)
-               vfree(ip_conntrack_hash);
+       if (vmalloced)
+               vfree(hash);
        else
-               free_pages((unsigned long)ip_conntrack_hash, 
-                          get_order(sizeof(struct list_head)
-                                    * ip_conntrack_htable_size));
+               free_pages((unsigned long)hash, 
+                          get_order(sizeof(struct list_head) * size));
 }
 
 void ip_conntrack_flush()
@@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void)
        ip_conntrack_flush();
        kmem_cache_destroy(ip_conntrack_cachep);
        kmem_cache_destroy(ip_conntrack_expect_cachep);
-       free_conntrack_hash();
+       free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
+                           ip_conntrack_htable_size);
        nf_unregister_sockopt(&so_getorigdst);
 }
 
-static int hashsize;
-module_param(hashsize, int, 0400);
+static struct list_head *alloc_hashtable(int size, int *vmalloced)
+{
+       struct list_head *hash;
+       unsigned int i;
+
+       *vmalloced = 0; 
+       hash = (void*)__get_free_pages(GFP_KERNEL, 
+                                      get_order(sizeof(struct list_head)
+                                                * size));
+       if (!hash) { 
+               *vmalloced = 1;
+               printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
+               hash = vmalloc(sizeof(struct list_head) * size);
+       }
+
+       if (hash)
+               for (i = 0; i < size; i++)
+                       INIT_LIST_HEAD(&hash[i]);
+
+       return hash;
+}
+
+int set_hashsize(const char *val, struct kernel_param *kp)
+{
+       int i, bucket, hashsize, vmalloced;
+       int old_vmalloced, old_size;
+       int rnd;
+       struct list_head *hash, *old_hash;
+       struct ip_conntrack_tuple_hash *h;
+
+       /* On boot, we can set this without any fancy locking. */
+       if (!ip_conntrack_htable_size)
+               return param_set_int(val, kp);
+
+       hashsize = simple_strtol(val, NULL, 0);
+       if (!hashsize)
+               return -EINVAL;
+
+       hash = alloc_hashtable(hashsize, &vmalloced);
+       if (!hash)
+               return -ENOMEM;
+
+       /* We have to rehash for the new table anyway, so we also can 
+        * use a new random seed */
+       get_random_bytes(&rnd, 4);
+
+       write_lock_bh(&ip_conntrack_lock);
+       for (i = 0; i < ip_conntrack_htable_size; i++) {
+               while (!list_empty(&ip_conntrack_hash[i])) {
+                       h = list_entry(ip_conntrack_hash[i].next,
+                                      struct ip_conntrack_tuple_hash, list);
+                       list_del(&h->list);
+                       bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+                       list_add_tail(&h->list, &hash[bucket]);
+               }
+       }
+       old_size = ip_conntrack_htable_size;
+       old_vmalloced = ip_conntrack_vmalloc;
+       old_hash = ip_conntrack_hash;
+
+       ip_conntrack_htable_size = hashsize;
+       ip_conntrack_vmalloc = vmalloced;
+       ip_conntrack_hash = hash;
+       ip_conntrack_hash_rnd = rnd;
+       write_unlock_bh(&ip_conntrack_lock);
+
+       free_conntrack_hash(old_hash, old_vmalloced, old_size);
+       return 0;
+}
+
+module_param_call(hashsize, set_hashsize, param_get_uint,
+                 &ip_conntrack_htable_size, 0600);
 
 int __init ip_conntrack_init(void)
 {
@@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void)
 
        /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
         * machine has 256 buckets.  >= 1GB machines have 8192 buckets. */
-       if (hashsize) {
-               ip_conntrack_htable_size = hashsize;
-       } else {
+       if (!ip_conntrack_htable_size) {
                ip_conntrack_htable_size
                        = (((num_physpages << PAGE_SHIFT) / 16384)
                           / sizeof(struct list_head));
@@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void)
                return ret;
        }
 
-       /* AK: the hash table is twice as big than needed because it
-          uses list_head.  it would be much nicer to caches to use a
-          single pointer list head here. */
-       ip_conntrack_vmalloc = 0; 
-       ip_conntrack_hash 
-               =(void*)__get_free_pages(GFP_KERNEL, 
-                                        get_order(sizeof(struct list_head)
-                                                  *ip_conntrack_htable_size));
-       if (!ip_conntrack_hash) { 
-               ip_conntrack_vmalloc = 1;
-               printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n");
-               ip_conntrack_hash = vmalloc(sizeof(struct list_head)
-                                           * ip_conntrack_htable_size);
-       }
+       ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
+                                           &ip_conntrack_vmalloc);
        if (!ip_conntrack_hash) {
                printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
                goto err_unreg_sockopt;
@@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void)
        ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
        write_unlock_bh(&ip_conntrack_lock);
 
-       for (i = 0; i < ip_conntrack_htable_size; i++)
-               INIT_LIST_HEAD(&ip_conntrack_hash[i]);
-
        /* For use by ipt_REJECT */
        ip_ct_attach = ip_conntrack_attach;
 
@@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void)
 err_free_conntrack_slab:
        kmem_cache_destroy(ip_conntrack_cachep);
 err_free_hash:
-       free_conntrack_hash();
+       free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
+                           ip_conntrack_htable_size);
 err_unreg_sockopt:
        nf_unregister_sockopt(&so_getorigdst);
 
index f7943ba1f43c42d3d8c6546730c283ae134f53e6..a65e508fbd40e4681bb9b5649c36d5ed8959b913 100644 (file)
@@ -90,9 +90,7 @@ fold_field(void *mib[], int offt)
        unsigned long res = 0;
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_possible(i))
-                       continue;
+       for_each_cpu(i) {
                res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
                res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
        }
index b7185fb3377ce08105d46e34741cea01b85610a7..23e540365a143fde637aa72bd8697113cab444d6 100644 (file)
@@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
        struct sock *sk;
        int err, i, j;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_possible(i))
-                       continue;
-
+       for_each_cpu(i) {
                err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
                                       &per_cpu(__icmpv6_socket, i));
                if (err < 0) {
@@ -749,9 +746,7 @@ void icmpv6_cleanup(void)
 {
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_possible(i))
-                       continue;
+       for_each_cpu(i) {
                sock_release(per_cpu(__icmpv6_socket, i));
        }
        inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
index 334a5967831e061c7dca3ce1532d7f33e742a17d..50a13e75d70ec5fecd20269c8c4e90bc523164cc 100644 (file)
@@ -140,9 +140,7 @@ fold_field(void *mib[], int offt)
         unsigned long res = 0;
         int i;
  
-        for (i = 0; i < NR_CPUS; i++) {
-                if (!cpu_possible(i))
-                        continue;
+        for_each_cpu(i) {
                 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
                 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
         }
index 678c3f2c0d0bdd9abad85d3eeb16acf38c8a01d3..5ca283537bc66e9344c2e0295b6139d0628454a9 100644 (file)
@@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t
 
 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
 {
-       struct netlink_sock *nlk;
        int len = skb->len;
 
-       nlk = nlk_sk(sk);
-
        skb_queue_tail(&sk->sk_receive_queue, skb);
        sk->sk_data_ready(sk, len);
        sock_put(sk);
@@ -827,7 +824,7 @@ struct netlink_broadcast_data {
        int failure;
        int congested;
        int delivered;
-       unsigned int allocation;
+       gfp_t allocation;
        struct sk_buff *skb, *skb2;
 };
 
index e556d92c0bc4d2eb5883acda48b051a608a10d9f..b18fe504301944d8514816df8d6eb90f715b048d 100644 (file)
@@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
                }
                if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
                        return -EINVAL;
-               if (rose_route.ndigis > 8) /* No more than 8 digipeats */
+               if (rose_route.ndigis > AX25_MAX_DIGIS)
                        return -EINVAL;
                err = rose_add_node(&rose_route, dev);
                dev_put(dev);
index b74f7772b576b131b34b25c33fa544d4d6b84c8d..6e4dc28874d7690884753d32542fb039ed5e0b6d 100644 (file)
@@ -69,9 +69,7 @@ fold_field(void *mib[], int nr)
        unsigned long res = 0;
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_possible(i))
-                       continue;
+       for_each_cpu(i) {
                res +=
                    *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
                                         sizeof (unsigned long) * nr));
index 46a2ce00a29b8af7ca0ac4a67fd082eda541bf00..cdcab9ca4c60fed14b460da6a4c93794b18bf938 100644 (file)
@@ -6,7 +6,7 @@
 obj-$(CONFIG_SUNRPC) += sunrpc.o
 obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
 
-sunrpc-y := clnt.o xprt.o sched.o \
+sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
            auth.o auth_null.o auth_unix.o \
            svc.o svcsock.o svcauth.o svcauth_unix.o \
            pmap_clnt.o timer.o xdr.o \
index 505e2d4b3d6259e3363c8a1c428da3535533132a..a415d99c394dee1cc4f008c85a60b28a9bae354f 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
-#include <linux/socket.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/spinlock.h>
 
index fe1b874084bc6d9df9631eb497764ca63fa44aca..f3431a7e33da6603af6076116dfcfa7b5e5b6760 100644 (file)
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
 obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
 
 rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
-       gss_krb5_seqnum.o
+       gss_krb5_seqnum.o gss_krb5_wrap.o
 
 obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
 
index 2f7b867161d254cc6251dbe994fa90017a02e7a8..f44f46f1d8e053c0d330b442c2f3788cf4073dd4 100644 (file)
@@ -42,9 +42,8 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/socket.h>
-#include <linux/in.h>
 #include <linux/sched.h>
+#include <linux/pagemap.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/auth.h>
 #include <linux/sunrpc/auth_gss.h>
@@ -846,10 +845,8 @@ gss_marshal(struct rpc_task *task, u32 *p)
 
        /* We compute the checksum for the verifier over the xdr-encoded bytes
         * starting with the xid and ending at the end of the credential: */
-       iov.iov_base = req->rq_snd_buf.head[0].iov_base;
-       if (task->tk_client->cl_xprt->stream)
-               /* See clnt.c:call_header() */
-               iov.iov_base += 4;
+       iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
+                                       req->rq_snd_buf.head[0].iov_base);
        iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
        xdr_buf_from_iov(&iov, &verf_buf);
 
@@ -857,9 +854,7 @@ gss_marshal(struct rpc_task *task, u32 *p)
        *p++ = htonl(RPC_AUTH_GSS);
 
        mic.data = (u8 *)(p + 1);
-       maj_stat = gss_get_mic(ctx->gc_gss_ctx,
-                              GSS_C_QOP_DEFAULT, 
-                              &verf_buf, &mic);
+       maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
        if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
                cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
        } else if (maj_stat != 0) {
@@ -890,10 +885,8 @@ static u32 *
 gss_validate(struct rpc_task *task, u32 *p)
 {
        struct rpc_cred *cred = task->tk_msg.rpc_cred;
-       struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
-                                               gc_base);
        struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
-       u32             seq, qop_state;
+       u32             seq;
        struct kvec     iov;
        struct xdr_buf  verf_buf;
        struct xdr_netobj mic;
@@ -914,23 +907,14 @@ gss_validate(struct rpc_task *task, u32 *p)
        mic.data = (u8 *)p;
        mic.len = len;
 
-       maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state);
+       maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
                cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
        if (maj_stat)
                goto out_bad;
-       switch (gss_cred->gc_service) {
-       case RPC_GSS_SVC_NONE:
-              /* verifier data, flavor, length: */
-              task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2;
-              break;
-       case RPC_GSS_SVC_INTEGRITY:
-              /* verifier data, flavor, length, length, sequence number: */
-              task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4;
-              break;
-       case RPC_GSS_SVC_PRIVACY:
-              goto out_bad;
-       }
+       /* We leave it to unwrap to calculate au_rslack. For now we just
+        * calculate the length of the verifier: */
+       task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
        gss_put_ctx(ctx);
        dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n",
                        task->tk_pid);
@@ -975,8 +959,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
        p = iov->iov_base + iov->iov_len;
        mic.data = (u8 *)(p + 1);
 
-       maj_stat = gss_get_mic(ctx->gc_gss_ctx,
-                       GSS_C_QOP_DEFAULT, &integ_buf, &mic);
+       maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
        status = -EIO; /* XXX? */
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
                cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
@@ -990,6 +973,113 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
        return 0;
 }
 
+static void
+priv_release_snd_buf(struct rpc_rqst *rqstp)
+{
+       int i;
+
+       for (i=0; i < rqstp->rq_enc_pages_num; i++)
+               __free_page(rqstp->rq_enc_pages[i]);
+       kfree(rqstp->rq_enc_pages);
+}
+
+static int
+alloc_enc_pages(struct rpc_rqst *rqstp)
+{
+       struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+       int first, last, i;
+
+       if (snd_buf->page_len == 0) {
+               rqstp->rq_enc_pages_num = 0;
+               return 0;
+       }
+
+       first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+       last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
+       rqstp->rq_enc_pages_num = last - first + 1 + 1;
+       rqstp->rq_enc_pages
+               = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
+                               GFP_NOFS);
+       if (!rqstp->rq_enc_pages)
+               goto out;
+       for (i=0; i < rqstp->rq_enc_pages_num; i++) {
+               rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
+               if (rqstp->rq_enc_pages[i] == NULL)
+                       goto out_free;
+       }
+       rqstp->rq_release_snd_buf = priv_release_snd_buf;
+       return 0;
+out_free:
+       for (i--; i >= 0; i--) {
+               __free_page(rqstp->rq_enc_pages[i]);
+       }
+out:
+       return -EAGAIN;
+}
+
+static inline int
+gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+               kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj)
+{
+       struct xdr_buf  *snd_buf = &rqstp->rq_snd_buf;
+       u32             offset;
+       u32             maj_stat;
+       int             status;
+       u32             *opaque_len;
+       struct page     **inpages;
+       int             first;
+       int             pad;
+       struct kvec     *iov;
+       char            *tmp;
+
+       opaque_len = p++;
+       offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
+       *p++ = htonl(rqstp->rq_seqno);
+
+       status = encode(rqstp, p, obj);
+       if (status)
+               return status;
+
+       status = alloc_enc_pages(rqstp);
+       if (status)
+               return status;
+       first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+       inpages = snd_buf->pages + first;
+       snd_buf->pages = rqstp->rq_enc_pages;
+       snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
+       /* Give the tail its own page, in case we need extra space in the
+        * head when wrapping: */
+       if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
+               tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
+               memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
+               snd_buf->tail[0].iov_base = tmp;
+       }
+       maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
+       /* RPC_SLACK_SPACE should prevent this ever happening: */
+       BUG_ON(snd_buf->len > snd_buf->buflen);
+        status = -EIO;
+       /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
+        * done anyway, so it's safe to put the request on the wire: */
+       if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+               cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+       else if (maj_stat)
+               return status;
+
+       *opaque_len = htonl(snd_buf->len - offset);
+       /* guess whether we're in the head or the tail: */
+       if (snd_buf->page_len || snd_buf->tail[0].iov_len)
+               iov = snd_buf->tail;
+       else
+               iov = snd_buf->head;
+       p = iov->iov_base + iov->iov_len;
+       pad = 3 - ((snd_buf->len - offset - 1) & 3);
+       memset(p, 0, pad);
+       iov->iov_len += pad;
+       snd_buf->len += pad;
+
+       return 0;
+}
+
 static int
 gss_wrap_req(struct rpc_task *task,
             kxdrproc_t encode, void *rqstp, u32 *p, void *obj)
@@ -1017,6 +1107,8 @@ gss_wrap_req(struct rpc_task *task,
                                                                rqstp, p, obj);
                        break;
                        case RPC_GSS_SVC_PRIVACY:
+                       status = gss_wrap_req_priv(cred, ctx, encode,
+                                       rqstp, p, obj);
                        break;
        }
 out:
@@ -1054,8 +1146,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
        if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
                return status;
 
-       maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf,
-                       &mic, NULL);
+       maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
                cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
        if (maj_stat != GSS_S_COMPLETE)
@@ -1063,6 +1154,35 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
        return 0;
 }
 
+static inline int
+gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+               struct rpc_rqst *rqstp, u32 **p)
+{
+       struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
+       u32 offset;
+       u32 opaque_len;
+       u32 maj_stat;
+       int status = -EIO;
+
+       opaque_len = ntohl(*(*p)++);
+       offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
+       if (offset + opaque_len > rcv_buf->len)
+               return status;
+       /* remove padding: */
+       rcv_buf->len = offset + opaque_len;
+
+       maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
+       if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+               cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+       if (maj_stat != GSS_S_COMPLETE)
+               return status;
+       if (ntohl(*(*p)++) != rqstp->rq_seqno)
+               return status;
+
+       return 0;
+}
+
+
 static int
 gss_unwrap_resp(struct rpc_task *task,
                kxdrproc_t decode, void *rqstp, u32 *p, void *obj)
@@ -1071,6 +1191,9 @@ gss_unwrap_resp(struct rpc_task *task,
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
                        gc_base);
        struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
+       u32             *savedp = p;
+       struct kvec     *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
+       int             savedlen = head->iov_len;
        int             status = -EIO;
 
        if (ctx->gc_proc != RPC_GSS_PROC_DATA)
@@ -1084,8 +1207,14 @@ gss_unwrap_resp(struct rpc_task *task,
                                goto out;
                        break;
                        case RPC_GSS_SVC_PRIVACY:
+                       status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
+                       if (status)
+                               goto out;
                        break;
        }
+       /* take into account extra slack for integrity and privacy cases: */
+       task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp)
+                                               + (savedlen - head->iov_len);
 out_decode:
        status = decode(rqstp, p, obj);
 out:
index ee6ae74cd1b226b2bb7d39655f2a1862e4507c21..3f3d5437f02d38309bbad8cfc9d343720b6f3d04 100644 (file)
@@ -139,17 +139,91 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) {
        sg->length = len;
 }
 
+static int
+process_xdr_buf(struct xdr_buf *buf, int offset, int len,
+               int (*actor)(struct scatterlist *, void *), void *data)
+{
+       int i, page_len, thislen, page_offset, ret = 0;
+       struct scatterlist      sg[1];
+
+       if (offset >= buf->head[0].iov_len) {
+               offset -= buf->head[0].iov_len;
+       } else {
+               thislen = buf->head[0].iov_len - offset;
+               if (thislen > len)
+                       thislen = len;
+               buf_to_sg(sg, buf->head[0].iov_base + offset, thislen);
+               ret = actor(sg, data);
+               if (ret)
+                       goto out;
+               offset = 0;
+               len -= thislen;
+       }
+       if (len == 0)
+               goto out;
+
+       if (offset >= buf->page_len) {
+               offset -= buf->page_len;
+       } else {
+               page_len = buf->page_len - offset;
+               if (page_len > len)
+                       page_len = len;
+               len -= page_len;
+               page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
+               i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
+               thislen = PAGE_CACHE_SIZE - page_offset;
+               do {
+                       if (thislen > page_len)
+                               thislen = page_len;
+                       sg->page = buf->pages[i];
+                       sg->offset = page_offset;
+                       sg->length = thislen;
+                       ret = actor(sg, data);
+                       if (ret)
+                               goto out;
+                       page_len -= thislen;
+                       i++;
+                       page_offset = 0;
+                       thislen = PAGE_CACHE_SIZE;
+               } while (page_len != 0);
+               offset = 0;
+       }
+       if (len == 0)
+               goto out;
+
+       if (offset < buf->tail[0].iov_len) {
+               thislen = buf->tail[0].iov_len - offset;
+               if (thislen > len)
+                       thislen = len;
+               buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen);
+               ret = actor(sg, data);
+               len -= thislen;
+       }
+       if (len != 0)
+               ret = -EINVAL;
+out:
+       return ret;
+}
+
+static int
+checksummer(struct scatterlist *sg, void *data)
+{
+       struct crypto_tfm *tfm = (struct crypto_tfm *)data;
+
+       crypto_digest_update(tfm, sg, 1);
+
+       return 0;
+}
+
 /* checksum the plaintext data and hdrlen bytes of the token header */
 s32
 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-                  struct xdr_netobj *cksum)
+                  int body_offset, struct xdr_netobj *cksum)
 {
        char                            *cksumname;
        struct crypto_tfm               *tfm = NULL; /* XXX add to ctx? */
        struct scatterlist              sg[1];
        u32                             code = GSS_S_FAILURE;
-       int                             len, thislen, offset;
-       int                             i;
 
        switch (cksumtype) {
                case CKSUMTYPE_RSA_MD5:
@@ -169,33 +243,8 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
        crypto_digest_init(tfm);
        buf_to_sg(sg, header, hdrlen);
        crypto_digest_update(tfm, sg, 1);
-       if (body->head[0].iov_len) {
-               buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len);
-               crypto_digest_update(tfm, sg, 1);
-       }
-
-       len = body->page_len;
-       if (len != 0) {
-               offset = body->page_base & (PAGE_CACHE_SIZE - 1);
-               i = body->page_base >> PAGE_CACHE_SHIFT;
-               thislen = PAGE_CACHE_SIZE - offset;
-               do {
-                       if (thislen > len)
-                               thislen = len;
-                       sg->page = body->pages[i];
-                       sg->offset = offset;
-                       sg->length = thislen;
-                       crypto_digest_update(tfm, sg, 1);
-                       len -= thislen;
-                       i++;
-                       offset = 0;
-                       thislen = PAGE_CACHE_SIZE;
-               } while(len != 0);
-       }
-       if (body->tail[0].iov_len) {
-               buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len);
-               crypto_digest_update(tfm, sg, 1);
-       }
+       process_xdr_buf(body, body_offset, body->len - body_offset,
+                       checksummer, tfm);
        crypto_digest_final(tfm, cksum->data);
        code = 0;
 out:
@@ -204,3 +253,154 @@ out:
 }
 
 EXPORT_SYMBOL(make_checksum);
+
+struct encryptor_desc {
+       u8 iv[8]; /* XXX hard-coded blocksize */
+       struct crypto_tfm *tfm;
+       int pos;
+       struct xdr_buf *outbuf;
+       struct page **pages;
+       struct scatterlist infrags[4];
+       struct scatterlist outfrags[4];
+       int fragno;
+       int fraglen;
+};
+
+static int
+encryptor(struct scatterlist *sg, void *data)
+{
+       struct encryptor_desc *desc = data;
+       struct xdr_buf *outbuf = desc->outbuf;
+       struct page *in_page;
+       int thislen = desc->fraglen + sg->length;
+       int fraglen, ret;
+       int page_pos;
+
+       /* Worst case is 4 fragments: head, end of page 1, start
+        * of page 2, tail.  Anything more is a bug. */
+       BUG_ON(desc->fragno > 3);
+       desc->infrags[desc->fragno] = *sg;
+       desc->outfrags[desc->fragno] = *sg;
+
+       page_pos = desc->pos - outbuf->head[0].iov_len;
+       if (page_pos >= 0 && page_pos < outbuf->page_len) {
+               /* pages are not in place: */
+               int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+               in_page = desc->pages[i];
+       } else {
+               in_page = sg->page;
+       }
+       desc->infrags[desc->fragno].page = in_page;
+       desc->fragno++;
+       desc->fraglen += sg->length;
+       desc->pos += sg->length;
+
+       fraglen = thislen & 7; /* XXX hardcoded blocksize */
+       thislen -= fraglen;
+
+       if (thislen == 0)
+               return 0;
+
+       ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags,
+                                       thislen, desc->iv);
+       if (ret)
+               return ret;
+       if (fraglen) {
+               desc->outfrags[0].page = sg->page;
+               desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
+               desc->outfrags[0].length = fraglen;
+               desc->infrags[0] = desc->outfrags[0];
+               desc->infrags[0].page = in_page;
+               desc->fragno = 1;
+               desc->fraglen = fraglen;
+       } else {
+               desc->fragno = 0;
+               desc->fraglen = 0;
+       }
+       return 0;
+}
+
+int
+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset,
+               struct page **pages)
+{
+       int ret;
+       struct encryptor_desc desc;
+
+       BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
+
+       memset(desc.iv, 0, sizeof(desc.iv));
+       desc.tfm = tfm;
+       desc.pos = offset;
+       desc.outbuf = buf;
+       desc.pages = pages;
+       desc.fragno = 0;
+       desc.fraglen = 0;
+
+       ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
+       return ret;
+}
+
+EXPORT_SYMBOL(gss_encrypt_xdr_buf);
+
+struct decryptor_desc {
+       u8 iv[8]; /* XXX hard-coded blocksize */
+       struct crypto_tfm *tfm;
+       struct scatterlist frags[4];
+       int fragno;
+       int fraglen;
+};
+
+static int
+decryptor(struct scatterlist *sg, void *data)
+{
+       struct decryptor_desc *desc = data;
+       int thislen = desc->fraglen + sg->length;
+       int fraglen, ret;
+
+       /* Worst case is 4 fragments: head, end of page 1, start
+        * of page 2, tail.  Anything more is a bug. */
+       BUG_ON(desc->fragno > 3);
+       desc->frags[desc->fragno] = *sg;
+       desc->fragno++;
+       desc->fraglen += sg->length;
+
+       fraglen = thislen & 7; /* XXX hardcoded blocksize */
+       thislen -= fraglen;
+
+       if (thislen == 0)
+               return 0;
+
+       ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags,
+                                       thislen, desc->iv);
+       if (ret)
+               return ret;
+       if (fraglen) {
+               desc->frags[0].page = sg->page;
+               desc->frags[0].offset = sg->offset + sg->length - fraglen;
+               desc->frags[0].length = fraglen;
+               desc->fragno = 1;
+               desc->fraglen = fraglen;
+       } else {
+               desc->fragno = 0;
+               desc->fraglen = 0;
+       }
+       return 0;
+}
+
+int
+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset)
+{
+       struct decryptor_desc desc;
+
+       /* XXXJBF: */
+       BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
+
+       memset(desc.iv, 0, sizeof(desc.iv));
+       desc.tfm = tfm;
+       desc.fragno = 0;
+       desc.fraglen = 0;
+       return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+}
+
+EXPORT_SYMBOL(gss_decrypt_xdr_buf);
index 606a8a82cafbedc84545ec73def5ff2e54054b5c..5f1f806a0b1176401f0fb744eaa9b7d7579edc90 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/auth.h>
-#include <linux/in.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/crypto.h>
@@ -191,43 +190,12 @@ gss_delete_sec_context_kerberos(void *internal_ctx) {
        kfree(kctx);
 }
 
-static u32
-gss_verify_mic_kerberos(struct gss_ctx         *ctx,
-                       struct xdr_buf          *message,
-                       struct xdr_netobj       *mic_token,
-                       u32                     *qstate) {
-       u32 maj_stat = 0;
-       int qop_state;
-       struct krb5_ctx *kctx = ctx->internal_ctx_id;
-
-       maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state,
-                                  KG_TOK_MIC_MSG);
-       if (!maj_stat && qop_state)
-           *qstate = qop_state;
-
-       dprintk("RPC:      gss_verify_mic_kerberos returning %d\n", maj_stat);
-       return maj_stat;
-}
-
-static u32
-gss_get_mic_kerberos(struct gss_ctx    *ctx,
-                    u32                qop,
-                    struct xdr_buf     *message,
-                    struct xdr_netobj  *mic_token) {
-       u32 err = 0;
-       struct krb5_ctx *kctx = ctx->internal_ctx_id;
-
-       err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG);
-
-       dprintk("RPC:      gss_get_mic_kerberos returning %d\n",err);
-
-       return err;
-}
-
 static struct gss_api_ops gss_kerberos_ops = {
        .gss_import_sec_context = gss_import_sec_context_kerberos,
        .gss_get_mic            = gss_get_mic_kerberos,
        .gss_verify_mic         = gss_verify_mic_kerberos,
+       .gss_wrap               = gss_wrap_kerberos,
+       .gss_unwrap             = gss_unwrap_kerberos,
        .gss_delete_sec_context = gss_delete_sec_context_kerberos,
 };
 
@@ -242,6 +210,11 @@ static struct pf_desc gss_kerberos_pfs[] = {
                .service = RPC_GSS_SVC_INTEGRITY,
                .name = "krb5i",
        },
+       [2] = {
+               .pseudoflavor = RPC_AUTH_GSS_KRB5P,
+               .service = RPC_GSS_SVC_PRIVACY,
+               .name = "krb5p",
+       },
 };
 
 static struct gss_api_mech gss_kerberos_mech = {
index afeeb8715a774c7e05d80a28112ced76753dac09..13f8ae9794542d436f82274bff2e38c0a7724f18 100644 (file)
 # define RPCDBG_FACILITY        RPCDBG_AUTH
 #endif
 
-static inline int
-gss_krb5_padding(int blocksize, int length) {
-       /* Most of the code is block-size independent but in practice we
-        * use only 8: */
-       BUG_ON(blocksize != 8);
-       return 8 - (length & 7);
-}
-
 u32
-krb5_make_token(struct krb5_ctx *ctx, int qop_req,
-                  struct xdr_buf *text, struct xdr_netobj *token,
-                  int toktype)
+gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+               struct xdr_netobj *token)
 {
+       struct krb5_ctx         *ctx = gss_ctx->internal_ctx_id;
        s32                     checksum_type;
        struct xdr_netobj       md5cksum = {.len = 0, .data = NULL};
-       int                     blocksize = 0, tmsglen;
        unsigned char           *ptr, *krb5_hdr, *msg_start;
        s32                     now;
 
@@ -93,9 +84,6 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
 
        now = get_seconds();
 
-       if (qop_req != 0)
-               goto out_err;
-
        switch (ctx->signalg) {
                case SGN_ALG_DES_MAC_MD5:
                        checksum_type = CKSUMTYPE_RSA_MD5;
@@ -111,21 +99,13 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
                goto out_err;
        }
 
-       if (toktype == KG_TOK_WRAP_MSG) {
-               blocksize = crypto_tfm_alg_blocksize(ctx->enc);
-               tmsglen = blocksize + text->len
-                       + gss_krb5_padding(blocksize, blocksize + text->len);
-       } else {
-               tmsglen = 0;
-       }
-
-       token->len = g_token_size(&ctx->mech_used, 22 + tmsglen);
+       token->len = g_token_size(&ctx->mech_used, 22);
 
        ptr = token->data;
-       g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr);
+       g_make_token_header(&ctx->mech_used, 22, &ptr);
 
-       *ptr++ = (unsigned char) ((toktype>>8)&0xff);
-       *ptr++ = (unsigned char) (toktype&0xff);
+       *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff);
+       *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff);
 
        /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
        krb5_hdr = ptr - 2;
@@ -133,17 +113,9 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
 
        *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg);
        memset(krb5_hdr + 4, 0xff, 4);
-       if (toktype == KG_TOK_WRAP_MSG)
-               *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg);
 
-       if (toktype == KG_TOK_WRAP_MSG) {
-               /* XXX removing support for now */
-               goto out_err;
-       } else { /* Sign only.  */
-               if (make_checksum(checksum_type, krb5_hdr, 8, text,
-                                      &md5cksum))
+       if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
                        goto out_err;
-       }
 
        switch (ctx->signalg) {
        case SGN_ALG_DES_MAC_MD5:
index 8767fc53183d49c105a5d366bf03ff663ef6b775..2030475d98ed26b22632a6dc6f01068ef6f92947 100644 (file)
 #endif
 
 
-/* message_buffer is an input if toktype is MIC and an output if it is WRAP:
- * If toktype is MIC: read_token is a mic token, and message_buffer is the
- *   data that the mic was supposedly taken over.
- * If toktype is WRAP: read_token is a wrap token, and message_buffer is used
- *   to return the decrypted data.
- */
+/* read_token is a mic token, and message_buffer is the data that the mic was
+ * supposedly taken over. */
 
-/* XXX will need to change prototype and/or just split into a separate function
- * when we add privacy (because read_token will be in pages too). */
 u32
-krb5_read_token(struct krb5_ctx *ctx,
-               struct xdr_netobj *read_token,
-               struct xdr_buf *message_buffer,
-               int *qop_state, int toktype)
+gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+               struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
 {
+       struct krb5_ctx         *ctx = gss_ctx->internal_ctx_id;
        int                     signalg;
        int                     sealalg;
        s32                     checksum_type;
@@ -100,16 +93,12 @@ krb5_read_token(struct krb5_ctx *ctx,
                                        read_token->len))
                goto out;
 
-       if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff)))
+       if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
+           (*ptr++ != ( KG_TOK_MIC_MSG    &0xff))   )
                goto out;
 
        /* XXX sanity-check bodysize?? */
 
-       if (toktype == KG_TOK_WRAP_MSG) {
-               /* XXX gone */
-               goto out;
-       }
-
        /* get the sign and seal algorithms */
 
        signalg = ptr[0] + (ptr[1] << 8);
@@ -120,14 +109,7 @@ krb5_read_token(struct krb5_ctx *ctx,
        if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
                goto out;
 
-       if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) ||
-           ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff)))
-               goto out;
-
-       /* in the current spec, there is only one valid seal algorithm per
-          key type, so a simple comparison is ok */
-
-       if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg))
+       if (sealalg != 0xffff)
                goto out;
 
        /* there are several mappings of seal algorithms to sign algorithms,
@@ -154,7 +136,7 @@ krb5_read_token(struct krb5_ctx *ctx,
        switch (signalg) {
        case SGN_ALG_DES_MAC_MD5:
                ret = make_checksum(checksum_type, ptr - 2, 8,
-                                        message_buffer, &md5cksum);
+                                        message_buffer, 0, &md5cksum);
                if (ret)
                        goto out;
 
@@ -175,9 +157,6 @@ krb5_read_token(struct krb5_ctx *ctx,
 
        /* it got through unscathed.  Make sure the context is unexpired */
 
-       if (qop_state)
-               *qop_state = GSS_C_QOP_DEFAULT;
-
        now = get_seconds();
 
        ret = GSS_S_CONTEXT_EXPIRED;
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
new file mode 100644 (file)
index 0000000..af777cf
--- /dev/null
@@ -0,0 +1,363 @@
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/sunrpc/gss_krb5.h>
+#include <linux/random.h>
+#include <linux/pagemap.h>
+#include <asm/scatterlist.h>
+#include <linux/crypto.h>
+
+#ifdef RPC_DEBUG
+# define RPCDBG_FACILITY       RPCDBG_AUTH
+#endif
+
+static inline int
+gss_krb5_padding(int blocksize, int length)
+{
+       /* Most of the code is block-size independent but currently we
+        * use only 8: */
+       BUG_ON(blocksize != 8);
+       return 8 - (length & 7);
+}
+
+static inline void
+gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
+{
+       int padding = gss_krb5_padding(blocksize, buf->len - offset);
+       char *p;
+       struct kvec *iov;
+
+       if (buf->page_len || buf->tail[0].iov_len)
+               iov = &buf->tail[0];
+       else
+               iov = &buf->head[0];
+       p = iov->iov_base + iov->iov_len;
+       iov->iov_len += padding;
+       buf->len += padding;
+       memset(p, padding, padding);
+}
+
+static inline int
+gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
+{
+       u8 *ptr;
+       u8 pad;
+       int len = buf->len;
+
+       if (len <= buf->head[0].iov_len) {
+               pad = *(u8 *)(buf->head[0].iov_base + len - 1);
+               if (pad > buf->head[0].iov_len)
+                       return -EINVAL;
+               buf->head[0].iov_len -= pad;
+               goto out;
+       } else
+               len -= buf->head[0].iov_len;
+       if (len <= buf->page_len) {
+               int last = (buf->page_base + len - 1)
+                                       >>PAGE_CACHE_SHIFT;
+               int offset = (buf->page_base + len - 1)
+                                       & (PAGE_CACHE_SIZE - 1);
+               ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA);
+               pad = *(ptr + offset);
+               kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA);
+               goto out;
+       } else
+               len -= buf->page_len;
+       BUG_ON(len > buf->tail[0].iov_len);
+       pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
+out:
+       /* XXX: NOTE: we do not adjust the page lengths--they represent
+        * a range of data in the real filesystem page cache, and we need
+        * to know that range so the xdr code can properly place read data.
+        * However adjusting the head length, as we do above, is harmless.
+        * In the case of a request that fits into a single page, the server
+        * also uses length and head length together to determine the original
+        * start of the request to copy the request for deferal; so it's
+        * easier on the server if we adjust head and tail length in tandem.
+        * It's not really a problem that we don't fool with the page and
+        * tail lengths, though--at worst badly formed xdr might lead the
+        * server to attempt to parse the padding.
+        * XXX: Document all these weird requirements for gss mechanism
+        * wrap/unwrap functions. */
+       if (pad > blocksize)
+               return -EINVAL;
+       if (buf->len > pad)
+               buf->len -= pad;
+       else
+               return -EINVAL;
+       return 0;
+}
+
+static inline void
+make_confounder(char *p, int blocksize)
+{
+       static u64 i = 0;
+       u64 *q = (u64 *)p;
+
+       /* rfc1964 claims this should be "random".  But all that's really
+        * necessary is that it be unique.  And not even that is necessary in
+        * our case since our "gssapi" implementation exists only to support
+        * rpcsec_gss, so we know that the only buffers we will ever encrypt
+        * already begin with a unique sequence number.  Just to hedge my bets
+        * I'll make a half-hearted attempt at something unique, but ensuring
+        * uniqueness would mean worrying about atomicity and rollover, and I
+        * don't care enough. */
+
+       BUG_ON(blocksize != 8);
+       *q = i++;
+}
+
+/* Assumptions: the head and tail of inbuf are ours to play with.
+ * The pages, however, may be real pages in the page cache and we replace
+ * them with scratch pages from **pages before writing to them. */
+/* XXX: obviously the above should be documentation of wrap interface,
+ * and shouldn't be in this kerberos-specific file. */
+
+/* XXX factor out common code with seal/unseal. */
+
+u32
+gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
+               struct xdr_buf *buf, struct page **pages)
+{
+       struct krb5_ctx         *kctx = ctx->internal_ctx_id;
+       s32                     checksum_type;
+       struct xdr_netobj       md5cksum = {.len = 0, .data = NULL};
+       int                     blocksize = 0, plainlen;
+       unsigned char           *ptr, *krb5_hdr, *msg_start;
+       s32                     now;
+       int                     headlen;
+       struct page             **tmp_pages;
+
+       dprintk("RPC:     gss_wrap_kerberos\n");
+
+       now = get_seconds();
+
+       switch (kctx->signalg) {
+               case SGN_ALG_DES_MAC_MD5:
+                       checksum_type = CKSUMTYPE_RSA_MD5;
+                       break;
+               default:
+                       dprintk("RPC:      gss_krb5_seal: kctx->signalg %d not"
+                               " supported\n", kctx->signalg);
+                       goto out_err;
+       }
+       if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
+               dprintk("RPC:      gss_krb5_seal: kctx->sealalg %d not supported\n",
+                       kctx->sealalg);
+               goto out_err;
+       }
+
+       blocksize = crypto_tfm_alg_blocksize(kctx->enc);
+       gss_krb5_add_padding(buf, offset, blocksize);
+       BUG_ON((buf->len - offset) % blocksize);
+       plainlen = blocksize + buf->len - offset;
+
+       headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
+                                               (buf->len - offset);
+
+       ptr = buf->head[0].iov_base + offset;
+       /* shift data to make room for header. */
+       /* XXX Would be cleverer to encrypt while copying. */
+       /* XXX bounds checking, slack, etc. */
+       memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
+       buf->head[0].iov_len += headlen;
+       buf->len += headlen;
+       BUG_ON((buf->len - offset - headlen) % blocksize);
+
+       g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
+
+
+       *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
+       *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
+
+       /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
+       krb5_hdr = ptr - 2;
+       msg_start = krb5_hdr + 24;
+       /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
+
+       *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg);
+       memset(krb5_hdr + 4, 0xff, 4);
+       *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
+
+       make_confounder(msg_start, blocksize);
+
+       /* XXXJBF: UGH!: */
+       tmp_pages = buf->pages;
+       buf->pages = pages;
+       if (make_checksum(checksum_type, krb5_hdr, 8, buf,
+                               offset + headlen - blocksize, &md5cksum))
+               goto out_err;
+       buf->pages = tmp_pages;
+
+       switch (kctx->signalg) {
+       case SGN_ALG_DES_MAC_MD5:
+               if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+                                 md5cksum.data, md5cksum.len))
+                       goto out_err;
+               memcpy(krb5_hdr + 16,
+                      md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+                      KRB5_CKSUM_LENGTH);
+
+               dprintk("RPC:      make_seal_token: cksum data: \n");
+               print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
+               break;
+       default:
+               BUG();
+       }
+
+       kfree(md5cksum.data);
+
+       /* XXX would probably be more efficient to compute checksum
+        * and encrypt at the same time: */
+       if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
+                              kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)))
+               goto out_err;
+
+       if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
+                                                                       pages))
+               goto out_err;
+
+       kctx->seq_send++;
+
+       return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
+out_err:
+       if (md5cksum.data) kfree(md5cksum.data);
+       return GSS_S_FAILURE;
+}
+
+u32
+gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
+{
+       struct krb5_ctx         *kctx = ctx->internal_ctx_id;
+       int                     signalg;
+       int                     sealalg;
+       s32                     checksum_type;
+       struct xdr_netobj       md5cksum = {.len = 0, .data = NULL};
+       s32                     now;
+       int                     direction;
+       s32                     seqnum;
+       unsigned char           *ptr;
+       int                     bodysize;
+       u32                     ret = GSS_S_DEFECTIVE_TOKEN;
+       void                    *data_start, *orig_start;
+       int                     data_len;
+       int                     blocksize;
+
+       dprintk("RPC:      gss_unwrap_kerberos\n");
+
+       ptr = (u8 *)buf->head[0].iov_base + offset;
+       if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
+                                       buf->len - offset))
+               goto out;
+
+       if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
+           (*ptr++ !=  (KG_TOK_WRAP_MSG    &0xff))   )
+               goto out;
+
+       /* XXX sanity-check bodysize?? */
+
+       /* get the sign and seal algorithms */
+
+       signalg = ptr[0] + (ptr[1] << 8);
+       sealalg = ptr[2] + (ptr[3] << 8);
+
+       /* Sanity checks */
+
+       if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
+               goto out;
+
+       if (sealalg == 0xffff)
+               goto out;
+
+       /* in the current spec, there is only one valid seal algorithm per
+          key type, so a simple comparison is ok */
+
+       if (sealalg != kctx->sealalg)
+               goto out;
+
+       /* there are several mappings of seal algorithms to sign algorithms,
+          but few enough that we can try them all. */
+
+       if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
+           (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
+           (kctx->sealalg == SEAL_ALG_DES3KD &&
+            signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
+               goto out;
+
+       if (gss_decrypt_xdr_buf(kctx->enc, buf,
+                       ptr + 22 - (unsigned char *)buf->head[0].iov_base))
+               goto out;
+
+       /* compute the checksum of the message */
+
+       /* initialize the the cksum */
+       switch (signalg) {
+       case SGN_ALG_DES_MAC_MD5:
+               checksum_type = CKSUMTYPE_RSA_MD5;
+               break;
+       default:
+               ret = GSS_S_DEFECTIVE_TOKEN;
+               goto out;
+       }
+
+       switch (signalg) {
+       case SGN_ALG_DES_MAC_MD5:
+               ret = make_checksum(checksum_type, ptr - 2, 8, buf,
+                        ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
+               if (ret)
+                       goto out;
+
+               ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+                                  md5cksum.data, md5cksum.len);
+               if (ret)
+                       goto out;
+
+               if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
+                       ret = GSS_S_BAD_SIG;
+                       goto out;
+               }
+               break;
+       default:
+               ret = GSS_S_DEFECTIVE_TOKEN;
+               goto out;
+       }
+
+       /* it got through unscathed.  Make sure the context is unexpired */
+
+       now = get_seconds();
+
+       ret = GSS_S_CONTEXT_EXPIRED;
+       if (now > kctx->endtime)
+               goto out;
+
+       /* do sequencing checks */
+
+       ret = GSS_S_BAD_SIG;
+       if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
+                                   &seqnum)))
+               goto out;
+
+       if ((kctx->initiate && direction != 0xff) ||
+           (!kctx->initiate && direction != 0))
+               goto out;
+
+       /* Copy the data back to the right position.  XXX: Would probably be
+        * better to copy and encrypt at the same time. */
+
+       blocksize = crypto_tfm_alg_blocksize(kctx->enc);
+       data_start = ptr + 22 + blocksize;
+       orig_start = buf->head[0].iov_base + offset;
+       data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
+       memmove(orig_start, data_start, data_len);
+       buf->head[0].iov_len -= (data_start - orig_start);
+       buf->len -= (data_start - orig_start);
+
+       ret = GSS_S_DEFECTIVE_TOKEN;
+       if (gss_krb5_remove_padding(buf, blocksize))
+               goto out;
+
+       ret = GSS_S_COMPLETE;
+out:
+       if (md5cksum.data) kfree(md5cksum.data);
+       return ret;
+}
index 9dfb68377d694818beabf3983d3e19a891bf020e..b048bf672da2bbffbe35953b3e47a57b7e2383d3 100644 (file)
@@ -35,7 +35,6 @@
 
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/socket.h>
 #include <linux/module.h>
 #include <linux/sunrpc/msg_prot.h>
 #include <linux/sunrpc/gss_asn1.h>
@@ -251,13 +250,11 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
 
 u32
 gss_get_mic(struct gss_ctx     *context_handle,
-           u32                 qop,
            struct xdr_buf      *message,
            struct xdr_netobj   *mic_token)
 {
         return context_handle->mech_type->gm_ops
                ->gss_get_mic(context_handle,
-                             qop,
                              message,
                              mic_token);
 }
@@ -267,16 +264,34 @@ gss_get_mic(struct gss_ctx        *context_handle,
 u32
 gss_verify_mic(struct gss_ctx          *context_handle,
               struct xdr_buf           *message,
-              struct xdr_netobj        *mic_token,
-              u32                      *qstate)
+              struct xdr_netobj        *mic_token)
 {
        return context_handle->mech_type->gm_ops
                ->gss_verify_mic(context_handle,
                                 message,
-                                mic_token,
-                                qstate);
+                                mic_token);
 }
 
+u32
+gss_wrap(struct gss_ctx        *ctx_id,
+        int            offset,
+        struct xdr_buf *buf,
+        struct page    **inpages)
+{
+       return ctx_id->mech_type->gm_ops
+               ->gss_wrap(ctx_id, offset, buf, inpages);
+}
+
+u32
+gss_unwrap(struct gss_ctx      *ctx_id,
+          int                  offset,
+          struct xdr_buf       *buf)
+{
+       return ctx_id->mech_type->gm_ops
+               ->gss_unwrap(ctx_id, offset, buf);
+}
+
+
 /* gss_delete_sec_context: free all resources associated with context_handle.
  * Note this differs from the RFC 2744-specified prototype in that we don't
  * bother returning an output token, since it would never be used anyway. */
index 6c97d61baa9bfa3a819fa079a132eb745babd081..39b3edc146947a9c9918b3a6cbf8c0da9308dec8 100644 (file)
@@ -224,18 +224,13 @@ gss_delete_sec_context_spkm3(void *internal_ctx) {
 static u32
 gss_verify_mic_spkm3(struct gss_ctx            *ctx,
                        struct xdr_buf          *signbuf,
-                       struct xdr_netobj       *checksum,
-                       u32             *qstate) {
+                       struct xdr_netobj       *checksum)
+{
        u32 maj_stat = 0;
-       int qop_state = 0;
        struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
        dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
-       maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state,
-                                  SPKM_MIC_TOK);
-
-       if (!maj_stat && qop_state)
-           *qstate = qop_state;
+       maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
 
        dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
        return maj_stat;
@@ -243,15 +238,15 @@ gss_verify_mic_spkm3(struct gss_ctx               *ctx,
 
 static u32
 gss_get_mic_spkm3(struct gss_ctx       *ctx,
-                    u32                qop,
                     struct xdr_buf     *message_buffer,
-                    struct xdr_netobj  *message_token) {
+                    struct xdr_netobj  *message_token)
+{
        u32 err = 0;
        struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
        dprintk("RPC: gss_get_mic_spkm3\n");
 
-       err = spkm3_make_token(sctx, qop, message_buffer,
+       err = spkm3_make_token(sctx, message_buffer,
                              message_token, SPKM_MIC_TOK);
        return err;
 }
@@ -264,8 +259,8 @@ static struct gss_api_ops gss_spkm3_ops = {
 };
 
 static struct pf_desc gss_spkm3_pfs[] = {
-       {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"},
-       {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
+       {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"},
+       {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
 };
 
 static struct gss_api_mech gss_spkm3_mech = {
index 25339868d4621bbccdaedc95a3647b3d9843b08d..148201e929d08de77ca44f6b6dfbe8da20ab7e04 100644 (file)
@@ -51,7 +51,7 @@
  */
 
 u32
-spkm3_make_token(struct spkm3_ctx *ctx, int qop_req,
+spkm3_make_token(struct spkm3_ctx *ctx,
                   struct xdr_buf * text, struct xdr_netobj * token,
                   int toktype)
 {
@@ -68,8 +68,6 @@ spkm3_make_token(struct spkm3_ctx *ctx, int qop_req,
        dprintk("RPC: spkm3_make_token\n");
 
        now = jiffies;
-       if (qop_req != 0)
-               goto out_err;
 
        if (ctx->ctx_id.len != 16) {
                dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
index 65ce81bf0bc45789d1bbda9d2d944c41ca6a95c8..c3c0d95861039958305e89f437174b8c3bc13272 100644 (file)
@@ -52,7 +52,7 @@ u32
 spkm3_read_token(struct spkm3_ctx *ctx,
                struct xdr_netobj *read_token,    /* checksum */
                struct xdr_buf *message_buffer, /* signbuf */
-               int *qop_state, int toktype)
+               int toktype)
 {
        s32                     code;
        struct xdr_netobj       wire_cksum = {.len =0, .data = NULL};
index e3308195374e635b2ea617d64fc48684c24d1241..e4ada15ed856feba1f8c26683969c56cac1958b7 100644 (file)
@@ -566,8 +566,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
 
        if (rqstp->rq_deferred) /* skip verification of revisited request */
                return SVC_OK;
-       if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL)
-                                                       != GSS_S_COMPLETE) {
+       if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
                *authp = rpcsec_gsserr_credproblem;
                return SVC_DENIED;
        }
@@ -604,7 +603,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
        xdr_buf_from_iov(&iov, &verf_data);
        p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
        mic.data = (u8 *)(p + 1);
-       maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic);
+       maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
        if (maj_stat != GSS_S_COMPLETE)
                return -1;
        *p++ = htonl(mic.len);
@@ -710,7 +709,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
                goto out;
        if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
                goto out;
-       maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL);
+       maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
        if (maj_stat != GSS_S_COMPLETE)
                goto out;
        if (ntohl(svc_getu32(&buf->head[0])) != seq)
@@ -1012,7 +1011,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
                        resv = &resbuf->tail[0];
                }
                mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
-               if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic))
+               if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
                        goto out_err;
                svc_putu32(resv, htonl(mic.len));
                memset(mic.data + mic.len, 0,
index 9b72d3abf823bfd0fa0da0654bfd2b323c549169..f56767aaa9273678ca702758316acdd1587d9eef 100644 (file)
@@ -7,9 +7,7 @@
  */
 
 #include <linux/types.h>
-#include <linux/socket.h>
 #include <linux/module.h>
-#include <linux/in.h>
 #include <linux/utsname.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sched.h>
index 4ff297a9b15bbced0257d07071d65763e3a1ff0b..890fb5ea0dcbf29fcc9af5e56e7c8c356b09d4b4 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/module.h>
-#include <linux/socket.h>
-#include <linux/in.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/auth.h>
 
index f17e6153b688d34ae22b0df226ee74a63445c524..702ede309b067260130321a32f8e8c1779b529bf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/net/sunrpc/rpcclnt.c
+ *  linux/net/sunrpc/clnt.c
  *
  *  This file contains the high-level RPC interface.
  *  It is modeled as a finite state machine to support both synchronous
@@ -27,7 +27,6 @@
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/in.h>
 #include <linux/utsname.h>
 
 #include <linux/sunrpc/clnt.h>
@@ -53,6 +52,7 @@ static void   call_allocate(struct rpc_task *task);
 static void    call_encode(struct rpc_task *task);
 static void    call_decode(struct rpc_task *task);
 static void    call_bind(struct rpc_task *task);
+static void    call_bind_status(struct rpc_task *task);
 static void    call_transmit(struct rpc_task *task);
 static void    call_status(struct rpc_task *task);
 static void    call_refresh(struct rpc_task *task);
@@ -517,15 +517,8 @@ void
 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
 {
        struct rpc_xprt *xprt = clnt->cl_xprt;
-
-       xprt->sndsize = 0;
-       if (sndsize)
-               xprt->sndsize = sndsize + RPC_SLACK_SPACE;
-       xprt->rcvsize = 0;
-       if (rcvsize)
-               xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
-       if (xprt_connected(xprt))
-               xprt_sock_setbufsize(xprt);
+       if (xprt->ops->set_buffer_size)
+               xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
 }
 
 /*
@@ -685,13 +678,11 @@ call_allocate(struct rpc_task *task)
 static void
 call_encode(struct rpc_task *task)
 {
-       struct rpc_clnt *clnt = task->tk_client;
        struct rpc_rqst *req = task->tk_rqstp;
        struct xdr_buf *sndbuf = &req->rq_snd_buf;
        struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
        unsigned int    bufsiz;
        kxdrproc_t      encode;
-       int             status;
        u32             *p;
 
        dprintk("RPC: %4d call_encode (status %d)\n", 
@@ -719,11 +710,15 @@ call_encode(struct rpc_task *task)
                rpc_exit(task, -EIO);
                return;
        }
-       if (encode && (status = rpcauth_wrap_req(task, encode, req, p,
-                                                task->tk_msg.rpc_argp)) < 0) {
-               printk(KERN_WARNING "%s: can't encode arguments: %d\n",
-                               clnt->cl_protname, -status);
-               rpc_exit(task, status);
+       if (encode == NULL)
+               return;
+
+       task->tk_status = rpcauth_wrap_req(task, encode, req, p,
+                       task->tk_msg.rpc_argp);
+       if (task->tk_status == -ENOMEM) {
+               /* XXX: Is this sane? */
+               rpc_delay(task, 3*HZ);
+               task->tk_status = -EAGAIN;
        }
 }
 
@@ -734,43 +729,95 @@ static void
 call_bind(struct rpc_task *task)
 {
        struct rpc_clnt *clnt = task->tk_client;
-       struct rpc_xprt *xprt = clnt->cl_xprt;
-
-       dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid,
-                       xprt, (xprt_connected(xprt) ? "is" : "is not"));
 
-       task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect;
+       dprintk("RPC: %4d call_bind (status %d)\n",
+                               task->tk_pid, task->tk_status);
 
+       task->tk_action = call_connect;
        if (!clnt->cl_port) {
-               task->tk_action = call_connect;
-               task->tk_timeout = RPC_CONNECT_TIMEOUT;
+               task->tk_action = call_bind_status;
+               task->tk_timeout = task->tk_xprt->bind_timeout;
                rpc_getport(task, clnt);
        }
 }
 
 /*
- * 4a. Connect to the RPC server (TCP case)
+ * 4a. Sort out bind result
+ */
+static void
+call_bind_status(struct rpc_task *task)
+{
+       int status = -EACCES;
+
+       if (task->tk_status >= 0) {
+               dprintk("RPC: %4d call_bind_status (status %d)\n",
+                                       task->tk_pid, task->tk_status);
+               task->tk_status = 0;
+               task->tk_action = call_connect;
+               return;
+       }
+
+       switch (task->tk_status) {
+       case -EACCES:
+               dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
+                               task->tk_pid);
+               rpc_delay(task, 3*HZ);
+               goto retry_bind;
+       case -ETIMEDOUT:
+               dprintk("RPC: %4d rpcbind request timed out\n",
+                               task->tk_pid);
+               if (RPC_IS_SOFT(task)) {
+                       status = -EIO;
+                       break;
+               }
+               goto retry_bind;
+       case -EPFNOSUPPORT:
+               dprintk("RPC: %4d remote rpcbind service unavailable\n",
+                               task->tk_pid);
+               break;
+       case -EPROTONOSUPPORT:
+               dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
+                               task->tk_pid);
+               break;
+       default:
+               dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
+                               task->tk_pid, -task->tk_status);
+               status = -EIO;
+               break;
+       }
+
+       rpc_exit(task, status);
+       return;
+
+retry_bind:
+       task->tk_status = 0;
+       task->tk_action = call_bind;
+       return;
+}
+
+/*
+ * 4b. Connect to the RPC server
  */
 static void
 call_connect(struct rpc_task *task)
 {
-       struct rpc_clnt *clnt = task->tk_client;
+       struct rpc_xprt *xprt = task->tk_xprt;
 
-       dprintk("RPC: %4d call_connect status %d\n",
-                               task->tk_pid, task->tk_status);
+       dprintk("RPC: %4d call_connect xprt %p %s connected\n",
+                       task->tk_pid, xprt,
+                       (xprt_connected(xprt) ? "is" : "is not"));
 
-       if (xprt_connected(clnt->cl_xprt)) {
-               task->tk_action = call_transmit;
-               return;
+       task->tk_action = call_transmit;
+       if (!xprt_connected(xprt)) {
+               task->tk_action = call_connect_status;
+               if (task->tk_status < 0)
+                       return;
+               xprt_connect(task);
        }
-       task->tk_action = call_connect_status;
-       if (task->tk_status < 0)
-               return;
-       xprt_connect(task);
 }
 
 /*
- * 4b. Sort out connect result
+ * 4c. Sort out connect result
  */
 static void
 call_connect_status(struct rpc_task *task)
@@ -778,6 +825,9 @@ call_connect_status(struct rpc_task *task)
        struct rpc_clnt *clnt = task->tk_client;
        int status = task->tk_status;
 
+       dprintk("RPC: %5u call_connect_status (status %d)\n", 
+                               task->tk_pid, task->tk_status);
+
        task->tk_status = 0;
        if (status >= 0) {
                clnt->cl_stats->netreconn++;
@@ -785,17 +835,19 @@ call_connect_status(struct rpc_task *task)
                return;
        }
 
-       /* Something failed: we may have to rebind */
+       /* Something failed: remote service port may have changed */
        if (clnt->cl_autobind)
                clnt->cl_port = 0;
+
        switch (status) {
        case -ENOTCONN:
        case -ETIMEDOUT:
        case -EAGAIN:
-               task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
+               task->tk_action = call_bind;
                break;
        default:
                rpc_exit(task, -EIO);
+               break;
        }
 }
 
@@ -815,10 +867,12 @@ call_transmit(struct rpc_task *task)
        if (task->tk_status != 0)
                return;
        /* Encode here so that rpcsec_gss can use correct sequence number. */
-       if (!task->tk_rqstp->rq_bytes_sent)
+       if (task->tk_rqstp->rq_bytes_sent == 0) {
                call_encode(task);
-       if (task->tk_status < 0)
-               return;
+               /* Did the encode result in an error condition? */
+               if (task->tk_status != 0)
+                       goto out_nosend;
+       }
        xprt_transmit(task);
        if (task->tk_status < 0)
                return;
@@ -826,6 +880,10 @@ call_transmit(struct rpc_task *task)
                task->tk_action = NULL;
                rpc_wake_up_task(task);
        }
+       return;
+out_nosend:
+       /* release socket write lock before attempting to handle error */
+       xprt_abort_transmit(task);
 }
 
 /*
@@ -1020,13 +1078,12 @@ static u32 *
 call_header(struct rpc_task *task)
 {
        struct rpc_clnt *clnt = task->tk_client;
-       struct rpc_xprt *xprt = clnt->cl_xprt;
        struct rpc_rqst *req = task->tk_rqstp;
        u32             *p = req->rq_svec[0].iov_base;
 
        /* FIXME: check buffer size? */
-       if (xprt->stream)
-               *p++ = 0;               /* fill in later */
+
+       p = xprt_skip_transport_header(task->tk_xprt, p);
        *p++ = req->rq_xid;             /* XID */
        *p++ = htonl(RPC_CALL);         /* CALL */
        *p++ = htonl(RPC_VERSION);      /* RPC version */
index 4e81f27669239cbf9a4ca83d7d7af2040d30317a..a398575f94b8a1dd1a9da067c597ccdd7bb24ce3 100644 (file)
@@ -26,7 +26,7 @@
 #define PMAP_GETPORT           3
 
 static struct rpc_procinfo     pmap_procedures[];
-static struct rpc_clnt *       pmap_create(char *, struct sockaddr_in *, int);
+static struct rpc_clnt *       pmap_create(char *, struct sockaddr_in *, int, int);
 static void                    pmap_getport_done(struct rpc_task *);
 static struct rpc_program      pmap_program;
 static DEFINE_SPINLOCK(pmap_lock);
@@ -65,7 +65,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
        map->pm_binding = 1;
        spin_unlock(&pmap_lock);
 
-       pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot);
+       pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0);
        if (IS_ERR(pmap_clnt)) {
                task->tk_status = PTR_ERR(pmap_clnt);
                goto bailout;
@@ -112,7 +112,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
                        NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
 
        sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr));
-       pmap_clnt = pmap_create(hostname, sin, prot);
+       pmap_clnt = pmap_create(hostname, sin, prot, 0);
        if (IS_ERR(pmap_clnt))
                return PTR_ERR(pmap_clnt);
 
@@ -171,7 +171,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
 
        sin.sin_family = AF_INET;
        sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-       pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP);
+       pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
        if (IS_ERR(pmap_clnt)) {
                error = PTR_ERR(pmap_clnt);
                dprintk("RPC: couldn't create pmap client. Error = %d\n", error);
@@ -198,7 +198,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
 }
 
 static struct rpc_clnt *
-pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto)
+pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged)
 {
        struct rpc_xprt *xprt;
        struct rpc_clnt *clnt;
@@ -208,6 +208,8 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto)
        if (IS_ERR(xprt))
                return (struct rpc_clnt *)xprt;
        xprt->addr.sin_port = htons(RPC_PMAP_PORT);
+       if (!privileged)
+               xprt->resvport = 0;
 
        /* printk("pmap: create clnt\n"); */
        clnt = rpc_new_client(xprt, hostname,
index ded6c63f11ec968263890ebded1f65ea31f1f674..4f188d0a5d11c2d51dd3c6b7b65a7ccd2e21d2c2 100644 (file)
@@ -76,25 +76,35 @@ int
 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
 {
        struct rpc_inode *rpci = RPC_I(inode);
-       int res = 0;
+       int res = -EPIPE;
 
        down(&inode->i_sem);
+       if (rpci->ops == NULL)
+               goto out;
        if (rpci->nreaders) {
                list_add_tail(&msg->list, &rpci->pipe);
                rpci->pipelen += msg->len;
+               res = 0;
        } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
                if (list_empty(&rpci->pipe))
                        schedule_delayed_work(&rpci->queue_timeout,
                                        RPC_UPCALL_TIMEOUT);
                list_add_tail(&msg->list, &rpci->pipe);
                rpci->pipelen += msg->len;
-       } else
-               res = -EPIPE;
+               res = 0;
+       }
+out:
        up(&inode->i_sem);
        wake_up(&rpci->waitq);
        return res;
 }
 
+static inline void
+rpc_inode_setowner(struct inode *inode, void *private)
+{
+       RPC_I(inode)->private = private;
+}
+
 static void
 rpc_close_pipes(struct inode *inode)
 {
@@ -111,15 +121,10 @@ rpc_close_pipes(struct inode *inode)
                        rpci->ops->release_pipe(inode);
                rpci->ops = NULL;
        }
+       rpc_inode_setowner(inode, NULL);
        up(&inode->i_sem);
 }
 
-static inline void
-rpc_inode_setowner(struct inode *inode, void *private)
-{
-       RPC_I(inode)->private = private;
-}
-
 static struct inode *
 rpc_alloc_inode(struct super_block *sb)
 {
@@ -501,7 +506,6 @@ repeat:
                        dentry = dvec[--n];
                        if (dentry->d_inode) {
                                rpc_close_pipes(dentry->d_inode);
-                               rpc_inode_setowner(dentry->d_inode, NULL);
                                simple_unlink(dir, dentry);
                        }
                        dput(dentry);
@@ -576,10 +580,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
        int error;
 
        shrink_dcache_parent(dentry);
-       if (dentry->d_inode) {
+       if (dentry->d_inode)
                rpc_close_pipes(dentry->d_inode);
-               rpc_inode_setowner(dentry->d_inode, NULL);
-       }
        if ((error = simple_rmdir(dir, dentry)) != 0)
                return error;
        if (!error) {
@@ -732,7 +734,6 @@ rpc_unlink(char *path)
        d_drop(dentry);
        if (dentry->d_inode) {
                rpc_close_pipes(dentry->d_inode);
-               rpc_inode_setowner(dentry->d_inode, NULL);
                error = simple_unlink(dir, dentry);
        }
        dput(dentry);
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
new file mode 100644 (file)
index 0000000..8f97e90
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * linux/net/sunrpc/socklib.c
+ *
+ * Common socket helper routines for RPC client and server
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/udp.h>
+#include <linux/sunrpc/xdr.h>
+
+
+/**
+ * skb_read_bits - copy some data bits from skb to internal buffer
+ * @desc: sk_buff copy helper
+ * @to: copy destination
+ * @len: number of bytes to copy
+ *
+ * Possibly called several times to iterate over an sk_buff and copy
+ * data out of it.
+ */
+static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
+{
+       if (len > desc->count)
+               len = desc->count;
+       if (skb_copy_bits(desc->skb, desc->offset, to, len))
+               return 0;
+       desc->count -= len;
+       desc->offset += len;
+       return len;
+}
+
+/**
+ * skb_read_and_csum_bits - copy and checksum from skb to buffer
+ * @desc: sk_buff copy helper
+ * @to: copy destination
+ * @len: number of bytes to copy
+ *
+ * Same as skb_read_bits, but calculate a checksum at the same time.
+ */
+static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
+{
+       unsigned int    csum2, pos;
+
+       if (len > desc->count)
+               len = desc->count;
+       pos = desc->offset;
+       csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
+       desc->csum = csum_block_add(desc->csum, csum2, pos);
+       desc->count -= len;
+       desc->offset += len;
+       return len;
+}
+
+/**
+ * xdr_partial_copy_from_skb - copy data out of an skb
+ * @xdr: target XDR buffer
+ * @base: starting offset
+ * @desc: sk_buff copy helper
+ * @copy_actor: virtual method for copying data
+ *
+ */
+ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor)
+{
+       struct page     **ppage = xdr->pages;
+       unsigned int    len, pglen = xdr->page_len;
+       ssize_t         copied = 0;
+       int             ret;
+
+       len = xdr->head[0].iov_len;
+       if (base < len) {
+               len -= base;
+               ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
+               copied += ret;
+               if (ret != len || !desc->count)
+                       goto out;
+               base = 0;
+       } else
+               base -= len;
+
+       if (unlikely(pglen == 0))
+               goto copy_tail;
+       if (unlikely(base >= pglen)) {
+               base -= pglen;
+               goto copy_tail;
+       }
+       if (base || xdr->page_base) {
+               pglen -= base;
+               base += xdr->page_base;
+               ppage += base >> PAGE_CACHE_SHIFT;
+               base &= ~PAGE_CACHE_MASK;
+       }
+       do {
+               char *kaddr;
+
+               /* ACL likes to be lazy in allocating pages - ACLs
+                * are small by default but can get huge. */
+               if (unlikely(*ppage == NULL)) {
+                       *ppage = alloc_page(GFP_ATOMIC);
+                       if (unlikely(*ppage == NULL)) {
+                               if (copied == 0)
+                                       copied = -ENOMEM;
+                               goto out;
+                       }
+               }
+
+               len = PAGE_CACHE_SIZE;
+               kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
+               if (base) {
+                       len -= base;
+                       if (pglen < len)
+                               len = pglen;
+                       ret = copy_actor(desc, kaddr + base, len);
+                       base = 0;
+               } else {
+                       if (pglen < len)
+                               len = pglen;
+                       ret = copy_actor(desc, kaddr, len);
+               }
+               flush_dcache_page(*ppage);
+               kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
+               copied += ret;
+               if (ret != len || !desc->count)
+                       goto out;
+               ppage++;
+       } while ((pglen -= len) != 0);
+copy_tail:
+       len = xdr->tail[0].iov_len;
+       if (base < len)
+               copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
+out:
+       return copied;
+}
+
+/**
+ * csum_partial_copy_to_xdr - checksum and copy data
+ * @xdr: target XDR buffer
+ * @skb: source skb
+ *
+ * We have set things up such that we perform the checksum of the UDP
+ * packet in parallel with the copies into the RPC client iovec.  -DaveM
+ */
+int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
+{
+       skb_reader_t    desc;
+
+       desc.skb = skb;
+       desc.offset = sizeof(struct udphdr);
+       desc.count = skb->len - desc.offset;
+
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+               goto no_checksum;
+
+       desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
+       if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
+               return -1;
+       if (desc.offset != skb->len) {
+               unsigned int csum2;
+               csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
+               desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
+       }
+       if (desc.count)
+               return -1;
+       if ((unsigned short)csum_fold(desc.csum))
+               return -1;
+       return 0;
+no_checksum:
+       if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
+               return -1;
+       if (desc.count)
+               return -1;
+       return 0;
+}
index ed48ff022d3529055c4a508ca86db9379052107d..2387e7b823ff8b49da0c1ddeb14db6c780e775d0 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 
 #include <linux/types.h>
-#include <linux/socket.h>
 #include <linux/sched.h>
 #include <linux/uio.h>
 #include <linux/unistd.h>
index 691dea4a58e757ed60e7ff739b0acac3b6bb79e7..f16e7cdd6150a811f9523ca311de6b46c47f63ef 100644 (file)
@@ -548,9 +548,6 @@ svc_write_space(struct sock *sk)
 /*
  * Receive a datagram from a UDP socket.
  */
-extern int
-csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb);
-
 static int
 svc_udp_recvfrom(struct svc_rqst *rqstp)
 {
index 1b9616a12e245405427606c494941327664e52c6..d0c9f460e411e04c2b4400c6e9e0fb33ff46f051 100644 (file)
@@ -119,8 +119,18 @@ done:
        return 0;
 }
 
+unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
+EXPORT_SYMBOL(xprt_min_resvport);
+unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
+EXPORT_SYMBOL(xprt_max_resvport);
+
+
 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
+static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
+static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
 
 static ctl_table debug_table[] = {
        {
@@ -177,6 +187,28 @@ static ctl_table debug_table[] = {
                .extra1         = &min_slot_table_size,
                .extra2         = &max_slot_table_size
        },
+       {
+               .ctl_name       = CTL_MIN_RESVPORT,
+               .procname       = "min_resvport",
+               .data           = &xprt_min_resvport,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &xprt_min_resvport_limit,
+               .extra2         = &xprt_max_resvport_limit
+       },
+       {
+               .ctl_name       = CTL_MAX_RESVPORT,
+               .procname       = "max_resvport",
+               .data           = &xprt_max_resvport,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &xprt_min_resvport_limit,
+               .extra2         = &xprt_max_resvport_limit
+       },
        { .ctl_name = 0 }
 };
 
index fde16f40a581dfb64e725e694afb4df5090693ed..32df43372ee97cc277ed54d058f77667c8985160 100644 (file)
@@ -6,15 +6,12 @@
  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  */
 
+#include <linux/module.h>
 #include <linux/types.h>
-#include <linux/socket.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <net/sock.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/msg_prot.h>
 
@@ -176,178 +173,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
        xdr->buflen += len;
 }
 
-ssize_t
-xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
-                         skb_reader_t *desc,
-                         skb_read_actor_t copy_actor)
-{
-       struct page     **ppage = xdr->pages;
-       unsigned int    len, pglen = xdr->page_len;
-       ssize_t         copied = 0;
-       int             ret;
-
-       len = xdr->head[0].iov_len;
-       if (base < len) {
-               len -= base;
-               ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
-               copied += ret;
-               if (ret != len || !desc->count)
-                       goto out;
-               base = 0;
-       } else
-               base -= len;
-
-       if (pglen == 0)
-               goto copy_tail;
-       if (base >= pglen) {
-               base -= pglen;
-               goto copy_tail;
-       }
-       if (base || xdr->page_base) {
-               pglen -= base;
-               base  += xdr->page_base;
-               ppage += base >> PAGE_CACHE_SHIFT;
-               base &= ~PAGE_CACHE_MASK;
-       }
-       do {
-               char *kaddr;
-
-               /* ACL likes to be lazy in allocating pages - ACLs
-                * are small by default but can get huge. */
-               if (unlikely(*ppage == NULL)) {
-                       *ppage = alloc_page(GFP_ATOMIC);
-                       if (unlikely(*ppage == NULL)) {
-                               if (copied == 0)
-                                       copied = -ENOMEM;
-                               goto out;
-                       }
-               }
-
-               len = PAGE_CACHE_SIZE;
-               kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
-               if (base) {
-                       len -= base;
-                       if (pglen < len)
-                               len = pglen;
-                       ret = copy_actor(desc, kaddr + base, len);
-                       base = 0;
-               } else {
-                       if (pglen < len)
-                               len = pglen;
-                       ret = copy_actor(desc, kaddr, len);
-               }
-               flush_dcache_page(*ppage);
-               kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
-               copied += ret;
-               if (ret != len || !desc->count)
-                       goto out;
-               ppage++;
-       } while ((pglen -= len) != 0);
-copy_tail:
-       len = xdr->tail[0].iov_len;
-       if (base < len)
-               copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
-out:
-       return copied;
-}
-
-
-int
-xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
-               struct xdr_buf *xdr, unsigned int base, int msgflags)
-{
-       struct page **ppage = xdr->pages;
-       unsigned int len, pglen = xdr->page_len;
-       int err, ret = 0;
-       ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
-
-       len = xdr->head[0].iov_len;
-       if (base < len || (addr != NULL && base == 0)) {
-               struct kvec iov = {
-                       .iov_base = xdr->head[0].iov_base + base,
-                       .iov_len  = len - base,
-               };
-               struct msghdr msg = {
-                       .msg_name    = addr,
-                       .msg_namelen = addrlen,
-                       .msg_flags   = msgflags,
-               };
-               if (xdr->len > len)
-                       msg.msg_flags |= MSG_MORE;
-
-               if (iov.iov_len != 0)
-                       err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
-               else
-                       err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
-               if (ret == 0)
-                       ret = err;
-               else if (err > 0)
-                       ret += err;
-               if (err != iov.iov_len)
-                       goto out;
-               base = 0;
-       } else
-               base -= len;
-
-       if (pglen == 0)
-               goto copy_tail;
-       if (base >= pglen) {
-               base -= pglen;
-               goto copy_tail;
-       }
-       if (base || xdr->page_base) {
-               pglen -= base;
-               base  += xdr->page_base;
-               ppage += base >> PAGE_CACHE_SHIFT;
-               base &= ~PAGE_CACHE_MASK;
-       }
-
-       sendpage = sock->ops->sendpage ? : sock_no_sendpage;
-       do {
-               int flags = msgflags;
-
-               len = PAGE_CACHE_SIZE;
-               if (base)
-                       len -= base;
-               if (pglen < len)
-                       len = pglen;
-
-               if (pglen != len || xdr->tail[0].iov_len != 0)
-                       flags |= MSG_MORE;
-
-               /* Hmm... We might be dealing with highmem pages */
-               if (PageHighMem(*ppage))
-                       sendpage = sock_no_sendpage;
-               err = sendpage(sock, *ppage, base, len, flags);
-               if (ret == 0)
-                       ret = err;
-               else if (err > 0)
-                       ret += err;
-               if (err != len)
-                       goto out;
-               base = 0;
-               ppage++;
-       } while ((pglen -= len) != 0);
-copy_tail:
-       len = xdr->tail[0].iov_len;
-       if (base < len) {
-               struct kvec iov = {
-                       .iov_base = xdr->tail[0].iov_base + base,
-                       .iov_len  = len - base,
-               };
-               struct msghdr msg = {
-                       .msg_flags   = msgflags,
-               };
-               err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
-               if (ret == 0)
-                       ret = err;
-               else if (err > 0)
-                       ret += err;
-       }
-out:
-       return ret;
-}
-
 
 /*
  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
index 3c654e06b08455477185df8b20f48f8bc4928f12..6dda3860351fb1502f5ee12d0cc7e00e7aebb060 100644 (file)
  *     one is available. Otherwise, it sleeps on the backlog queue
  *     (xprt_reserve).
  *  -  Next, the caller puts together the RPC message, stuffs it into
- *     the request struct, and calls xprt_call().
- *  -  xprt_call transmits the message and installs the caller on the
- *     socket's wait list. At the same time, it installs a timer that
+ *     the request struct, and calls xprt_transmit().
+ *  -  xprt_transmit sends the message and installs the caller on the
+ *     transport's wait list. At the same time, it installs a timer that
  *     is run after the packet's timeout has expired.
  *  -  When a packet arrives, the data_ready handler walks the list of
- *     pending requests for that socket. If a matching XID is found, the
+ *     pending requests for that transport. If a matching XID is found, the
  *     caller is woken up, and the timer removed.
  *  -  When no reply arrives within the timeout interval, the timer is
  *     fired by the kernel and runs xprt_timer(). It either adjusts the
  *
  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
  *
- *  TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
- *  TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
- *  TCP NFS related read + write fixes
- *   (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
- *
- *  Rewrite of larges part of the code in order to stabilize TCP stuff.
- *  Fix behaviour when socket buffer is full.
- *   (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
+ *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
  */
 
+#include <linux/module.h>
+
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <linux/mm.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/file.h>
+#include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/random.h>
 
-#include <net/sock.h>
-#include <net/checksum.h>
-#include <net/udp.h>
-#include <net/tcp.h>
+#include <linux/sunrpc/clnt.h>
 
 /*
  * Local variables
 # define RPCDBG_FACILITY       RPCDBG_XPRT
 #endif
 
-#define XPRT_MAX_BACKOFF       (8)
-#define XPRT_IDLE_TIMEOUT      (5*60*HZ)
-#define XPRT_MAX_RESVPORT      (800)
-
 /*
  * Local functions
  */
 static void    xprt_request_init(struct rpc_task *, struct rpc_xprt *);
 static inline void     do_xprt_reserve(struct rpc_task *);
-static void    xprt_disconnect(struct rpc_xprt *);
 static void    xprt_connect_status(struct rpc_task *task);
-static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap,
-                                               struct rpc_timeout *to);
-static struct socket *xprt_create_socket(struct rpc_xprt *, int, int);
-static void    xprt_bind_socket(struct rpc_xprt *, struct socket *);
 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
 
-static int     xprt_clear_backlog(struct rpc_xprt *xprt);
-
-#ifdef RPC_DEBUG_DATA
 /*
- * Print the buffer contents (first 128 bytes only--just enough for
- * diropres return).
+ * The transport code maintains an estimate on the maximum number of out-
+ * standing RPC requests, using a smoothed version of the congestion
+ * avoidance implemented in 44BSD. This is basically the Van Jacobson
+ * congestion algorithm: If a retransmit occurs, the congestion window is
+ * halved; otherwise, it is incremented by 1/cwnd when
+ *
+ *     -       a reply is received and
+ *     -       a full number of requests are outstanding and
+ *     -       the congestion window hasn't been updated recently.
  */
-static void
-xprt_pktdump(char *msg, u32 *packet, unsigned int count)
-{
-       u8      *buf = (u8 *) packet;
-       int     j;
-
-       dprintk("RPC:      %s\n", msg);
-       for (j = 0; j < count && j < 128; j += 4) {
-               if (!(j & 31)) {
-                       if (j)
-                               dprintk("\n");
-                       dprintk("0x%04x ", j);
-               }
-               dprintk("%02x%02x%02x%02x ",
-                       buf[j], buf[j+1], buf[j+2], buf[j+3]);
-       }
-       dprintk("\n");
-}
-#else
-static inline void
-xprt_pktdump(char *msg, u32 *packet, unsigned int count)
-{
-       /* NOP */
-}
-#endif
+#define RPC_CWNDSHIFT          (8U)
+#define RPC_CWNDSCALE          (1U << RPC_CWNDSHIFT)
+#define RPC_INITCWND           RPC_CWNDSCALE
+#define RPC_MAXCWND(xprt)      ((xprt)->max_reqs << RPC_CWNDSHIFT)
 
-/*
- * Look up RPC transport given an INET socket
+#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
+
+/**
+ * xprt_reserve_xprt - serialize write access to transports
+ * @task: task that is requesting access to the transport
+ *
+ * This prevents mixing the payload of separate requests, and prevents
+ * transport connects from colliding with writes.  No congestion control
+ * is provided.
  */
-static inline struct rpc_xprt *
-xprt_from_sock(struct sock *sk)
+int xprt_reserve_xprt(struct rpc_task *task)
 {
-       return (struct rpc_xprt *) sk->sk_user_data;
+       struct rpc_xprt *xprt = task->tk_xprt;
+       struct rpc_rqst *req = task->tk_rqstp;
+
+       if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
+               if (task == xprt->snd_task)
+                       return 1;
+               if (task == NULL)
+                       return 0;
+               goto out_sleep;
+       }
+       xprt->snd_task = task;
+       if (req) {
+               req->rq_bytes_sent = 0;
+               req->rq_ntrans++;
+       }
+       return 1;
+
+out_sleep:
+       dprintk("RPC: %4d failed to lock transport %p\n",
+                       task->tk_pid, xprt);
+       task->tk_timeout = 0;
+       task->tk_status = -EAGAIN;
+       if (req && req->rq_ntrans)
+               rpc_sleep_on(&xprt->resend, task, NULL, NULL);
+       else
+               rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+       return 0;
 }
 
 /*
- * Serialize write access to sockets, in order to prevent different
- * requests from interfering with each other.
- * Also prevents TCP socket connects from colliding with writes.
+ * xprt_reserve_xprt_cong - serialize write access to transports
+ * @task: task that is requesting access to the transport
+ *
+ * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
+ * integrated into the decision of whether a request is allowed to be
+ * woken up and given access to the transport.
  */
-static int
-__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
+int xprt_reserve_xprt_cong(struct rpc_task *task)
 {
+       struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_rqst *req = task->tk_rqstp;
 
-       if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
+       if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
                if (task == xprt->snd_task)
                        return 1;
                goto out_sleep;
        }
-       if (xprt->nocong || __xprt_get_cong(xprt, task)) {
+       if (__xprt_get_cong(xprt, task)) {
                xprt->snd_task = task;
                if (req) {
                        req->rq_bytes_sent = 0;
@@ -156,10 +146,10 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
                return 1;
        }
        smp_mb__before_clear_bit();
-       clear_bit(XPRT_LOCKED, &xprt->sockstate);
+       clear_bit(XPRT_LOCKED, &xprt->state);
        smp_mb__after_clear_bit();
 out_sleep:
-       dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt);
+       dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
        task->tk_timeout = 0;
        task->tk_status = -EAGAIN;
        if (req && req->rq_ntrans)
@@ -169,26 +159,52 @@ out_sleep:
        return 0;
 }
 
-static inline int
-xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
+static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        int retval;
 
-       spin_lock_bh(&xprt->sock_lock);
-       retval = __xprt_lock_write(xprt, task);
-       spin_unlock_bh(&xprt->sock_lock);
+       spin_lock_bh(&xprt->transport_lock);
+       retval = xprt->ops->reserve_xprt(task);
+       spin_unlock_bh(&xprt->transport_lock);
        return retval;
 }
 
+static void __xprt_lock_write_next(struct rpc_xprt *xprt)
+{
+       struct rpc_task *task;
+       struct rpc_rqst *req;
 
-static void
-__xprt_lock_write_next(struct rpc_xprt *xprt)
+       if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
+               return;
+
+       task = rpc_wake_up_next(&xprt->resend);
+       if (!task) {
+               task = rpc_wake_up_next(&xprt->sending);
+               if (!task)
+                       goto out_unlock;
+       }
+
+       req = task->tk_rqstp;
+       xprt->snd_task = task;
+       if (req) {
+               req->rq_bytes_sent = 0;
+               req->rq_ntrans++;
+       }
+       return;
+
+out_unlock:
+       smp_mb__before_clear_bit();
+       clear_bit(XPRT_LOCKED, &xprt->state);
+       smp_mb__after_clear_bit();
+}
+
+static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
 {
        struct rpc_task *task;
 
-       if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
+       if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
                return;
-       if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
+       if (RPCXPRT_CONGESTED(xprt))
                goto out_unlock;
        task = rpc_wake_up_next(&xprt->resend);
        if (!task) {
@@ -196,7 +212,7 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
                if (!task)
                        goto out_unlock;
        }
-       if (xprt->nocong || __xprt_get_cong(xprt, task)) {
+       if (__xprt_get_cong(xprt, task)) {
                struct rpc_rqst *req = task->tk_rqstp;
                xprt->snd_task = task;
                if (req) {
@@ -207,87 +223,52 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
        }
 out_unlock:
        smp_mb__before_clear_bit();
-       clear_bit(XPRT_LOCKED, &xprt->sockstate);
+       clear_bit(XPRT_LOCKED, &xprt->state);
        smp_mb__after_clear_bit();
 }
 
-/*
- * Releases the socket for use by other requests.
+/**
+ * xprt_release_xprt - allow other requests to use a transport
+ * @xprt: transport with other tasks potentially waiting
+ * @task: task that is releasing access to the transport
+ *
+ * Note that "task" can be NULL.  No congestion control is provided.
  */
-static void
-__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        if (xprt->snd_task == task) {
                xprt->snd_task = NULL;
                smp_mb__before_clear_bit();
-               clear_bit(XPRT_LOCKED, &xprt->sockstate);
+               clear_bit(XPRT_LOCKED, &xprt->state);
                smp_mb__after_clear_bit();
                __xprt_lock_write_next(xprt);
        }
 }
 
-static inline void
-xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
-{
-       spin_lock_bh(&xprt->sock_lock);
-       __xprt_release_write(xprt, task);
-       spin_unlock_bh(&xprt->sock_lock);
-}
-
-/*
- * Write data to socket.
+/**
+ * xprt_release_xprt_cong - allow other requests to use a transport
+ * @xprt: transport with other tasks potentially waiting
+ * @task: task that is releasing access to the transport
+ *
+ * Note that "task" can be NULL.  Another task is awoken to use the
+ * transport if the transport's congestion window allows it.
  */
-static inline int
-xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
+void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-       struct socket   *sock = xprt->sock;
-       struct xdr_buf  *xdr = &req->rq_snd_buf;
-       struct sockaddr *addr = NULL;
-       int addrlen = 0;
-       unsigned int    skip;
-       int             result;
-
-       if (!sock)
-               return -ENOTCONN;
-
-       xprt_pktdump("packet data:",
-                               req->rq_svec->iov_base,
-                               req->rq_svec->iov_len);
-
-       /* For UDP, we need to provide an address */
-       if (!xprt->stream) {
-               addr = (struct sockaddr *) &xprt->addr;
-               addrlen = sizeof(xprt->addr);
+       if (xprt->snd_task == task) {
+               xprt->snd_task = NULL;
+               smp_mb__before_clear_bit();
+               clear_bit(XPRT_LOCKED, &xprt->state);
+               smp_mb__after_clear_bit();
+               __xprt_lock_write_next_cong(xprt);
        }
-       /* Dont repeat bytes */
-       skip = req->rq_bytes_sent;
-
-       clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
-       result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT);
-
-       dprintk("RPC:      xprt_sendmsg(%d) = %d\n", xdr->len - skip, result);
-
-       if (result >= 0)
-               return result;
+}
 
-       switch (result) {
-       case -ECONNREFUSED:
-               /* When the server has died, an ICMP port unreachable message
-                * prompts ECONNREFUSED.
-                */
-       case -EAGAIN:
-               break;
-       case -ECONNRESET:
-       case -ENOTCONN:
-       case -EPIPE:
-               /* connection broken */
-               if (xprt->stream)
-                       result = -ENOTCONN;
-               break;
-       default:
-               printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result);
-       }
-       return result;
+static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+       spin_lock_bh(&xprt->transport_lock);
+       xprt->ops->release_xprt(xprt, task);
+       spin_unlock_bh(&xprt->transport_lock);
 }
 
 /*
@@ -321,26 +302,40 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
                return;
        req->rq_cong = 0;
        xprt->cong -= RPC_CWNDSCALE;
-       __xprt_lock_write_next(xprt);
+       __xprt_lock_write_next_cong(xprt);
 }
 
-/*
- * Adjust RPC congestion window
+/**
+ * xprt_release_rqst_cong - housekeeping when request is complete
+ * @task: RPC request that recently completed
+ *
+ * Useful for transports that require congestion control.
+ */
+void xprt_release_rqst_cong(struct rpc_task *task)
+{
+       __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
+}
+
+/**
+ * xprt_adjust_cwnd - adjust transport congestion window
+ * @task: recently completed RPC request used to adjust window
+ * @result: result code of completed RPC request
+ *
  * We use a time-smoothed congestion estimator to avoid heavy oscillation.
  */
-static void
-xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
+void xprt_adjust_cwnd(struct rpc_task *task, int result)
 {
-       unsigned long   cwnd;
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_xprt *xprt = task->tk_xprt;
+       unsigned long cwnd = xprt->cwnd;
 
-       cwnd = xprt->cwnd;
        if (result >= 0 && cwnd <= xprt->cong) {
                /* The (cwnd >> 1) term makes sure
                 * the result gets rounded properly. */
                cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
                if (cwnd > RPC_MAXCWND(xprt))
                        cwnd = RPC_MAXCWND(xprt);
-               __xprt_lock_write_next(xprt);
+               __xprt_lock_write_next_cong(xprt);
        } else if (result == -ETIMEDOUT) {
                cwnd >>= 1;
                if (cwnd < RPC_CWNDSCALE)
@@ -349,11 +344,89 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
        dprintk("RPC:      cong %ld, cwnd was %ld, now %ld\n",
                        xprt->cong, xprt->cwnd, cwnd);
        xprt->cwnd = cwnd;
+       __xprt_put_cong(xprt, req);
+}
+
+/**
+ * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
+ * @xprt: transport with waiting tasks
+ * @status: result code to plant in each task before waking it
+ *
+ */
+void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
+{
+       if (status < 0)
+               rpc_wake_up_status(&xprt->pending, status);
+       else
+               rpc_wake_up(&xprt->pending);
+}
+
+/**
+ * xprt_wait_for_buffer_space - wait for transport output buffer to clear
+ * @task: task to be put to sleep
+ *
+ */
+void xprt_wait_for_buffer_space(struct rpc_task *task)
+{
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_xprt *xprt = req->rq_xprt;
+
+       task->tk_timeout = req->rq_timeout;
+       rpc_sleep_on(&xprt->pending, task, NULL, NULL);
+}
+
+/**
+ * xprt_write_space - wake the task waiting for transport output buffer space
+ * @xprt: transport with waiting tasks
+ *
+ * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
+ */
+void xprt_write_space(struct rpc_xprt *xprt)
+{
+       if (unlikely(xprt->shutdown))
+               return;
+
+       spin_lock_bh(&xprt->transport_lock);
+       if (xprt->snd_task) {
+               dprintk("RPC:      write space: waking waiting task on xprt %p\n",
+                               xprt);
+               rpc_wake_up_task(xprt->snd_task);
+       }
+       spin_unlock_bh(&xprt->transport_lock);
+}
+
+/**
+ * xprt_set_retrans_timeout_def - set a request's retransmit timeout
+ * @task: task whose timeout is to be set
+ *
+ * Set a request's retransmit timeout based on the transport's
+ * default timeout parameters.  Used by transports that don't adjust
+ * the retransmit timeout based on round-trip time estimation.
+ */
+void xprt_set_retrans_timeout_def(struct rpc_task *task)
+{
+       task->tk_timeout = task->tk_rqstp->rq_timeout;
 }
 
 /*
- * Reset the major timeout value
+ * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
+ * @task: task whose timeout is to be set
+ * 
+ * Set a request's retransmit timeout using the RTT estimator.
  */
+void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
+{
+       int timer = task->tk_msg.rpc_proc->p_timer;
+       struct rpc_rtt *rtt = task->tk_client->cl_rtt;
+       struct rpc_rqst *req = task->tk_rqstp;
+       unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
+
+       task->tk_timeout = rpc_calc_rto(rtt, timer);
+       task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
+       if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
+               task->tk_timeout = max_timeout;
+}
+
 static void xprt_reset_majortimeo(struct rpc_rqst *req)
 {
        struct rpc_timeout *to = &req->rq_xprt->timeout;
@@ -368,8 +441,10 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req)
        req->rq_majortimeo += jiffies;
 }
 
-/*
- * Adjust timeout values etc for next retransmit
+/**
+ * xprt_adjust_timeout - adjust timeout values for next retransmit
+ * @req: RPC request containing parameters to use for the adjustment
+ *
  */
 int xprt_adjust_timeout(struct rpc_rqst *req)
 {
@@ -391,9 +466,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
                req->rq_retries = 0;
                xprt_reset_majortimeo(req);
                /* Reset the RTT counters == "slow start" */
-               spin_lock_bh(&xprt->sock_lock);
+               spin_lock_bh(&xprt->transport_lock);
                rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
-               spin_unlock_bh(&xprt->sock_lock);
+               spin_unlock_bh(&xprt->transport_lock);
                pprintk("RPC: %lu timeout\n", jiffies);
                status = -ETIMEDOUT;
        }
@@ -405,133 +480,52 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
        return status;
 }
 
-/*
- * Close down a transport socket
- */
-static void
-xprt_close(struct rpc_xprt *xprt)
-{
-       struct socket   *sock = xprt->sock;
-       struct sock     *sk = xprt->inet;
-
-       if (!sk)
-               return;
-
-       write_lock_bh(&sk->sk_callback_lock);
-       xprt->inet = NULL;
-       xprt->sock = NULL;
-
-       sk->sk_user_data    = NULL;
-       sk->sk_data_ready   = xprt->old_data_ready;
-       sk->sk_state_change = xprt->old_state_change;
-       sk->sk_write_space  = xprt->old_write_space;
-       write_unlock_bh(&sk->sk_callback_lock);
-
-       sk->sk_no_check  = 0;
-
-       sock_release(sock);
-}
-
-static void
-xprt_socket_autoclose(void *args)
+static void xprt_autoclose(void *args)
 {
        struct rpc_xprt *xprt = (struct rpc_xprt *)args;
 
        xprt_disconnect(xprt);
-       xprt_close(xprt);
+       xprt->ops->close(xprt);
        xprt_release_write(xprt, NULL);
 }
 
-/*
- * Mark a transport as disconnected
+/**
+ * xprt_disconnect - mark a transport as disconnected
+ * @xprt: transport to flag for disconnect
+ *
  */
-static void
-xprt_disconnect(struct rpc_xprt *xprt)
+void xprt_disconnect(struct rpc_xprt *xprt)
 {
        dprintk("RPC:      disconnected transport %p\n", xprt);
-       spin_lock_bh(&xprt->sock_lock);
+       spin_lock_bh(&xprt->transport_lock);
        xprt_clear_connected(xprt);
-       rpc_wake_up_status(&xprt->pending, -ENOTCONN);
-       spin_unlock_bh(&xprt->sock_lock);
+       xprt_wake_pending_tasks(xprt, -ENOTCONN);
+       spin_unlock_bh(&xprt->transport_lock);
 }
 
-/*
- * Used to allow disconnection when we've been idle
- */
 static void
 xprt_init_autodisconnect(unsigned long data)
 {
        struct rpc_xprt *xprt = (struct rpc_xprt *)data;
 
-       spin_lock(&xprt->sock_lock);
+       spin_lock(&xprt->transport_lock);
        if (!list_empty(&xprt->recv) || xprt->shutdown)
                goto out_abort;
-       if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
+       if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
                goto out_abort;
-       spin_unlock(&xprt->sock_lock);
-       /* Let keventd close the socket */
-       if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0)
+       spin_unlock(&xprt->transport_lock);
+       if (xprt_connecting(xprt))
                xprt_release_write(xprt, NULL);
        else
                schedule_work(&xprt->task_cleanup);
        return;
 out_abort:
-       spin_unlock(&xprt->sock_lock);
-}
-
-static void xprt_socket_connect(void *args)
-{
-       struct rpc_xprt *xprt = (struct rpc_xprt *)args;
-       struct socket *sock = xprt->sock;
-       int status = -EIO;
-
-       if (xprt->shutdown || xprt->addr.sin_port == 0)
-               goto out;
-
-       /*
-        * Start by resetting any existing state
-        */
-       xprt_close(xprt);
-       sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport);
-       if (sock == NULL) {
-               /* couldn't create socket or bind to reserved port;
-                * this is likely a permanent error, so cause an abort */
-               goto out;
-       }
-       xprt_bind_socket(xprt, sock);
-       xprt_sock_setbufsize(xprt);
-
-       status = 0;
-       if (!xprt->stream)
-               goto out;
-
-       /*
-        * Tell the socket layer to start connecting...
-        */
-       status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
-                       sizeof(xprt->addr), O_NONBLOCK);
-       dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
-                       xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
-       if (status < 0) {
-               switch (status) {
-                       case -EINPROGRESS:
-                       case -EALREADY:
-                               goto out_clear;
-               }
-       }
-out:
-       if (status < 0)
-               rpc_wake_up_status(&xprt->pending, status);
-       else
-               rpc_wake_up(&xprt->pending);
-out_clear:
-       smp_mb__before_clear_bit();
-       clear_bit(XPRT_CONNECTING, &xprt->sockstate);
-       smp_mb__after_clear_bit();
+       spin_unlock(&xprt->transport_lock);
 }
 
-/*
- * Attempt to connect a TCP socket.
+/**
+ * xprt_connect - schedule a transport connect operation
+ * @task: RPC task that is requesting the connect
  *
  */
 void xprt_connect(struct rpc_task *task)
@@ -552,37 +546,19 @@ void xprt_connect(struct rpc_task *task)
        if (!xprt_lock_write(xprt, task))
                return;
        if (xprt_connected(xprt))
-               goto out_write;
+               xprt_release_write(xprt, task);
+       else {
+               if (task->tk_rqstp)
+                       task->tk_rqstp->rq_bytes_sent = 0;
 
-       if (task->tk_rqstp)
-               task->tk_rqstp->rq_bytes_sent = 0;
-
-       task->tk_timeout = RPC_CONNECT_TIMEOUT;
-       rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
-       if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) {
-               /* Note: if we are here due to a dropped connection
-                *       we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ
-                *       seconds
-                */
-               if (xprt->sock != NULL)
-                       schedule_delayed_work(&xprt->sock_connect,
-                                       RPC_REESTABLISH_TIMEOUT);
-               else {
-                       schedule_work(&xprt->sock_connect);
-                       if (!RPC_IS_ASYNC(task))
-                               flush_scheduled_work();
-               }
+               task->tk_timeout = xprt->connect_timeout;
+               rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
+               xprt->ops->connect(task);
        }
        return;
- out_write:
-       xprt_release_write(xprt, task);
 }
 
-/*
- * We arrive here when awoken from waiting on connection establishment.
- */
-static void
-xprt_connect_status(struct rpc_task *task)
+static void xprt_connect_status(struct rpc_task *task)
 {
        struct rpc_xprt *xprt = task->tk_xprt;
 
@@ -592,31 +568,42 @@ xprt_connect_status(struct rpc_task *task)
                return;
        }
 
-       /* if soft mounted, just cause this RPC to fail */
-       if (RPC_IS_SOFT(task))
-               task->tk_status = -EIO;
-
        switch (task->tk_status) {
        case -ECONNREFUSED:
        case -ECONNRESET:
+               dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
+                               task->tk_pid, task->tk_client->cl_server);
+               break;
        case -ENOTCONN:
-               return;
+               dprintk("RPC: %4d xprt_connect_status: connection broken\n",
+                               task->tk_pid);
+               break;
        case -ETIMEDOUT:
-               dprintk("RPC: %4d xprt_connect_status: timed out\n",
+               dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
                                task->tk_pid);
                break;
        default:
-               printk(KERN_ERR "RPC: error %d connecting to server %s\n",
-                               -task->tk_status, task->tk_client->cl_server);
+               dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
+                               task->tk_pid, -task->tk_status, task->tk_client->cl_server);
+               xprt_release_write(xprt, task);
+               task->tk_status = -EIO;
+               return;
+       }
+
+       /* if soft mounted, just cause this RPC to fail */
+       if (RPC_IS_SOFT(task)) {
+               xprt_release_write(xprt, task);
+               task->tk_status = -EIO;
        }
-       xprt_release_write(xprt, task);
 }
 
-/*
- * Look up the RPC request corresponding to a reply, and then lock it.
+/**
+ * xprt_lookup_rqst - find an RPC request corresponding to an XID
+ * @xprt: transport on which the original request was transmitted
+ * @xid: RPC XID of incoming reply
+ *
  */
-static inline struct rpc_rqst *
-xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
+struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
 {
        struct list_head *pos;
        struct rpc_rqst *req = NULL;
@@ -631,556 +618,68 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
        return req;
 }
 
-/*
- * Complete reply received.
- * The TCP code relies on us to remove the request from xprt->pending.
- */
-static void
-xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
-{
-       struct rpc_task *task = req->rq_task;
-       struct rpc_clnt *clnt = task->tk_client;
-
-       /* Adjust congestion window */
-       if (!xprt->nocong) {
-               unsigned timer = task->tk_msg.rpc_proc->p_timer;
-               xprt_adjust_cwnd(xprt, copied);
-               __xprt_put_cong(xprt, req);
-               if (timer) {
-                       if (req->rq_ntrans == 1)
-                               rpc_update_rtt(clnt->cl_rtt, timer,
-                                               (long)jiffies - req->rq_xtime);
-                       rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1);
-               }
-       }
-
-#ifdef RPC_PROFILE
-       /* Profile only reads for now */
-       if (copied > 1024) {
-               static unsigned long    nextstat;
-               static unsigned long    pkt_rtt, pkt_len, pkt_cnt;
-
-               pkt_cnt++;
-               pkt_len += req->rq_slen + copied;
-               pkt_rtt += jiffies - req->rq_xtime;
-               if (time_before(nextstat, jiffies)) {
-                       printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd);
-                       printk("RPC: %ld %ld %ld %ld stat\n",
-                                       jiffies, pkt_cnt, pkt_len, pkt_rtt);
-                       pkt_rtt = pkt_len = pkt_cnt = 0;
-                       nextstat = jiffies + 5 * HZ;
-               }
-       }
-#endif
-
-       dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied);
-       list_del_init(&req->rq_list);
-       req->rq_received = req->rq_private_buf.len = copied;
-
-       /* ... and wake up the process. */
-       rpc_wake_up_task(task);
-       return;
-}
-
-static size_t
-skb_read_bits(skb_reader_t *desc, void *to, size_t len)
-{
-       if (len > desc->count)
-               len = desc->count;
-       if (skb_copy_bits(desc->skb, desc->offset, to, len))
-               return 0;
-       desc->count -= len;
-       desc->offset += len;
-       return len;
-}
-
-static size_t
-skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
-{
-       unsigned int csum2, pos;
-
-       if (len > desc->count)
-               len = desc->count;
-       pos = desc->offset;
-       csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
-       desc->csum = csum_block_add(desc->csum, csum2, pos);
-       desc->count -= len;
-       desc->offset += len;
-       return len;
-}
-
-/*
- * We have set things up such that we perform the checksum of the UDP
- * packet in parallel with the copies into the RPC client iovec.  -DaveM
- */
-int
-csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
-{
-       skb_reader_t desc;
-
-       desc.skb = skb;
-       desc.offset = sizeof(struct udphdr);
-       desc.count = skb->len - desc.offset;
-
-       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
-               goto no_checksum;
-
-       desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-       if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
-               return -1;
-       if (desc.offset != skb->len) {
-               unsigned int csum2;
-               csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
-               desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
-       }
-       if (desc.count)
-               return -1;
-       if ((unsigned short)csum_fold(desc.csum))
-               return -1;
-       return 0;
-no_checksum:
-       if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
-               return -1;
-       if (desc.count)
-               return -1;
-       return 0;
-}
-
-/*
- * Input handler for RPC replies. Called from a bottom half and hence
- * atomic.
- */
-static void
-udp_data_ready(struct sock *sk, int len)
-{
-       struct rpc_task *task;
-       struct rpc_xprt *xprt;
-       struct rpc_rqst *rovr;
-       struct sk_buff  *skb;
-       int err, repsize, copied;
-       u32 _xid, *xp;
-
-       read_lock(&sk->sk_callback_lock);
-       dprintk("RPC:      udp_data_ready...\n");
-       if (!(xprt = xprt_from_sock(sk))) {
-               printk("RPC:      udp_data_ready request not found!\n");
-               goto out;
-       }
-
-       dprintk("RPC:      udp_data_ready client %p\n", xprt);
-
-       if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
-               goto out;
-
-       if (xprt->shutdown)
-               goto dropit;
-
-       repsize = skb->len - sizeof(struct udphdr);
-       if (repsize < 4) {
-               printk("RPC: impossible RPC reply size %d!\n", repsize);
-               goto dropit;
-       }
-
-       /* Copy the XID from the skb... */
-       xp = skb_header_pointer(skb, sizeof(struct udphdr),
-                               sizeof(_xid), &_xid);
-       if (xp == NULL)
-               goto dropit;
-
-       /* Look up and lock the request corresponding to the given XID */
-       spin_lock(&xprt->sock_lock);
-       rovr = xprt_lookup_rqst(xprt, *xp);
-       if (!rovr)
-               goto out_unlock;
-       task = rovr->rq_task;
-
-       dprintk("RPC: %4d received reply\n", task->tk_pid);
-
-       if ((copied = rovr->rq_private_buf.buflen) > repsize)
-               copied = repsize;
-
-       /* Suck it into the iovec, verify checksum if not done by hw. */
-       if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
-               goto out_unlock;
-
-       /* Something worked... */
-       dst_confirm(skb->dst);
-
-       xprt_complete_rqst(xprt, rovr, copied);
-
- out_unlock:
-       spin_unlock(&xprt->sock_lock);
- dropit:
-       skb_free_datagram(sk, skb);
- out:
-       read_unlock(&sk->sk_callback_lock);
-}
-
-/*
- * Copy from an skb into memory and shrink the skb.
- */
-static inline size_t
-tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
-{
-       if (len > desc->count)
-               len = desc->count;
-       if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
-               dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
-                               len, desc->count);
-               return 0;
-       }
-       desc->offset += len;
-       desc->count -= len;
-       dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
-                       len, desc->count);
-       return len;
-}
-
-/*
- * TCP read fragment marker
- */
-static inline void
-tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-       size_t len, used;
-       char *p;
-
-       p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
-       len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
-       used = tcp_copy_data(desc, p, len);
-       xprt->tcp_offset += used;
-       if (used != len)
-               return;
-       xprt->tcp_reclen = ntohl(xprt->tcp_recm);
-       if (xprt->tcp_reclen & 0x80000000)
-               xprt->tcp_flags |= XPRT_LAST_FRAG;
-       else
-               xprt->tcp_flags &= ~XPRT_LAST_FRAG;
-       xprt->tcp_reclen &= 0x7fffffff;
-       xprt->tcp_flags &= ~XPRT_COPY_RECM;
-       xprt->tcp_offset = 0;
-       /* Sanity check of the record length */
-       if (xprt->tcp_reclen < 4) {
-               printk(KERN_ERR "RPC: Invalid TCP record fragment length\n");
-               xprt_disconnect(xprt);
-       }
-       dprintk("RPC:      reading TCP record fragment of length %d\n",
-                       xprt->tcp_reclen);
-}
-
-static void
-tcp_check_recm(struct rpc_xprt *xprt)
-{
-       dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
-                       xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
-       if (xprt->tcp_offset == xprt->tcp_reclen) {
-               xprt->tcp_flags |= XPRT_COPY_RECM;
-               xprt->tcp_offset = 0;
-               if (xprt->tcp_flags & XPRT_LAST_FRAG) {
-                       xprt->tcp_flags &= ~XPRT_COPY_DATA;
-                       xprt->tcp_flags |= XPRT_COPY_XID;
-                       xprt->tcp_copied = 0;
-               }
-       }
-}
-
-/*
- * TCP read xid
- */
-static inline void
-tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-       size_t len, used;
-       char *p;
-
-       len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
-       dprintk("RPC:      reading XID (%Zu bytes)\n", len);
-       p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
-       used = tcp_copy_data(desc, p, len);
-       xprt->tcp_offset += used;
-       if (used != len)
-               return;
-       xprt->tcp_flags &= ~XPRT_COPY_XID;
-       xprt->tcp_flags |= XPRT_COPY_DATA;
-       xprt->tcp_copied = 4;
-       dprintk("RPC:      reading reply for XID %08x\n",
-                                               ntohl(xprt->tcp_xid));
-       tcp_check_recm(xprt);
-}
-
-/*
- * TCP read and complete request
- */
-static inline void
-tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-       struct rpc_rqst *req;
-       struct xdr_buf *rcvbuf;
-       size_t len;
-       ssize_t r;
-
-       /* Find and lock the request corresponding to this xid */
-       spin_lock(&xprt->sock_lock);
-       req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
-       if (!req) {
-               xprt->tcp_flags &= ~XPRT_COPY_DATA;
-               dprintk("RPC:      XID %08x request not found!\n",
-                               ntohl(xprt->tcp_xid));
-               spin_unlock(&xprt->sock_lock);
-               return;
-       }
-
-       rcvbuf = &req->rq_private_buf;
-       len = desc->count;
-       if (len > xprt->tcp_reclen - xprt->tcp_offset) {
-               skb_reader_t my_desc;
-
-               len = xprt->tcp_reclen - xprt->tcp_offset;
-               memcpy(&my_desc, desc, sizeof(my_desc));
-               my_desc.count = len;
-               r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-                                         &my_desc, tcp_copy_data);
-               desc->count -= r;
-               desc->offset += r;
-       } else
-               r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-                                         desc, tcp_copy_data);
-
-       if (r > 0) {
-               xprt->tcp_copied += r;
-               xprt->tcp_offset += r;
-       }
-       if (r != len) {
-               /* Error when copying to the receive buffer,
-                * usually because we weren't able to allocate
-                * additional buffer pages. All we can do now
-                * is turn off XPRT_COPY_DATA, so the request
-                * will not receive any additional updates,
-                * and time out.
-                * Any remaining data from this record will
-                * be discarded.
-                */
-               xprt->tcp_flags &= ~XPRT_COPY_DATA;
-               dprintk("RPC:      XID %08x truncated request\n",
-                               ntohl(xprt->tcp_xid));
-               dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-                               xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
-               goto out;
-       }
-
-       dprintk("RPC:      XID %08x read %Zd bytes\n",
-                       ntohl(xprt->tcp_xid), r);
-       dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-                       xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
-
-       if (xprt->tcp_copied == req->rq_private_buf.buflen)
-               xprt->tcp_flags &= ~XPRT_COPY_DATA;
-       else if (xprt->tcp_offset == xprt->tcp_reclen) {
-               if (xprt->tcp_flags & XPRT_LAST_FRAG)
-                       xprt->tcp_flags &= ~XPRT_COPY_DATA;
-       }
-
-out:
-       if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
-               dprintk("RPC: %4d received reply complete\n",
-                               req->rq_task->tk_pid);
-               xprt_complete_rqst(xprt, req, xprt->tcp_copied);
-       }
-       spin_unlock(&xprt->sock_lock);
-       tcp_check_recm(xprt);
-}
-
-/*
- * TCP discard extra bytes from a short read
- */
-static inline void
-tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-       size_t len;
-
-       len = xprt->tcp_reclen - xprt->tcp_offset;
-       if (len > desc->count)
-               len = desc->count;
-       desc->count -= len;
-       desc->offset += len;
-       xprt->tcp_offset += len;
-       dprintk("RPC:      discarded %Zu bytes\n", len);
-       tcp_check_recm(xprt);
-}
-
-/*
- * TCP record receive routine
- * We first have to grab the record marker, then the XID, then the data.
+/**
+ * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
+ * @task: RPC request that recently completed
+ *
  */
-static int
-tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
-               unsigned int offset, size_t len)
-{
-       struct rpc_xprt *xprt = rd_desc->arg.data;
-       skb_reader_t desc = {
-               .skb    = skb,
-               .offset = offset,
-               .count  = len,
-               .csum   = 0
-               };
-
-       dprintk("RPC:      tcp_data_recv\n");
-       do {
-               /* Read in a new fragment marker if necessary */
-               /* Can we ever really expect to get completely empty fragments? */
-               if (xprt->tcp_flags & XPRT_COPY_RECM) {
-                       tcp_read_fraghdr(xprt, &desc);
-                       continue;
-               }
-               /* Read in the xid if necessary */
-               if (xprt->tcp_flags & XPRT_COPY_XID) {
-                       tcp_read_xid(xprt, &desc);
-                       continue;
-               }
-               /* Read in the request data */
-               if (xprt->tcp_flags & XPRT_COPY_DATA) {
-                       tcp_read_request(xprt, &desc);
-                       continue;
-               }
-               /* Skip over any trailing bytes on short reads */
-               tcp_read_discard(xprt, &desc);
-       } while (desc.count);
-       dprintk("RPC:      tcp_data_recv done\n");
-       return len - desc.count;
-}
-
-static void tcp_data_ready(struct sock *sk, int bytes)
+void xprt_update_rtt(struct rpc_task *task)
 {
-       struct rpc_xprt *xprt;
-       read_descriptor_t rd_desc;
-
-       read_lock(&sk->sk_callback_lock);
-       dprintk("RPC:      tcp_data_ready...\n");
-       if (!(xprt = xprt_from_sock(sk))) {
-               printk("RPC:      tcp_data_ready socket info not found!\n");
-               goto out;
-       }
-       if (xprt->shutdown)
-               goto out;
-
-       /* We use rd_desc to pass struct xprt to tcp_data_recv */
-       rd_desc.arg.data = xprt;
-       rd_desc.count = 65536;
-       tcp_read_sock(sk, &rd_desc, tcp_data_recv);
-out:
-       read_unlock(&sk->sk_callback_lock);
-}
-
-static void
-tcp_state_change(struct sock *sk)
-{
-       struct rpc_xprt *xprt;
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_rtt *rtt = task->tk_client->cl_rtt;
+       unsigned timer = task->tk_msg.rpc_proc->p_timer;
 
-       read_lock(&sk->sk_callback_lock);
-       if (!(xprt = xprt_from_sock(sk)))
-               goto out;
-       dprintk("RPC:      tcp_state_change client %p...\n", xprt);
-       dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
-                               sk->sk_state, xprt_connected(xprt),
-                               sock_flag(sk, SOCK_DEAD),
-                               sock_flag(sk, SOCK_ZAPPED));
-
-       switch (sk->sk_state) {
-       case TCP_ESTABLISHED:
-               spin_lock_bh(&xprt->sock_lock);
-               if (!xprt_test_and_set_connected(xprt)) {
-                       /* Reset TCP record info */
-                       xprt->tcp_offset = 0;
-                       xprt->tcp_reclen = 0;
-                       xprt->tcp_copied = 0;
-                       xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
-                       rpc_wake_up(&xprt->pending);
-               }
-               spin_unlock_bh(&xprt->sock_lock);
-               break;
-       case TCP_SYN_SENT:
-       case TCP_SYN_RECV:
-               break;
-       default:
-               xprt_disconnect(xprt);
-               break;
+       if (timer) {
+               if (req->rq_ntrans == 1)
+                       rpc_update_rtt(rtt, timer,
+                                       (long)jiffies - req->rq_xtime);
+               rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
        }
- out:
-       read_unlock(&sk->sk_callback_lock);
 }
 
-/*
- * Called when more output buffer space is available for this socket.
- * We try not to wake our writers until they can make "significant"
- * progress, otherwise we'll waste resources thrashing sock_sendmsg
- * with a bunch of small requests.
+/**
+ * xprt_complete_rqst - called when reply processing is complete
+ * @task: RPC request that recently completed
+ * @copied: actual number of bytes received from the transport
+ *
+ * Caller holds transport lock.
  */
-static void
-xprt_write_space(struct sock *sk)
+void xprt_complete_rqst(struct rpc_task *task, int copied)
 {
-       struct rpc_xprt *xprt;
-       struct socket   *sock;
-
-       read_lock(&sk->sk_callback_lock);
-       if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
-               goto out;
-       if (xprt->shutdown)
-               goto out;
-
-       /* Wait until we have enough socket memory */
-       if (xprt->stream) {
-               /* from net/core/stream.c:sk_stream_write_space */
-               if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
-                       goto out;
-       } else {
-               /* from net/core/sock.c:sock_def_write_space */
-               if (!sock_writeable(sk))
-                       goto out;
-       }
+       struct rpc_rqst *req = task->tk_rqstp;
 
-       if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
-               goto out;
+       dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
+                       task->tk_pid, ntohl(req->rq_xid), copied);
 
-       spin_lock_bh(&xprt->sock_lock);
-       if (xprt->snd_task)
-               rpc_wake_up_task(xprt->snd_task);
-       spin_unlock_bh(&xprt->sock_lock);
-out:
-       read_unlock(&sk->sk_callback_lock);
+       list_del_init(&req->rq_list);
+       req->rq_received = req->rq_private_buf.len = copied;
+       rpc_wake_up_task(task);
 }
 
-/*
- * RPC receive timeout handler.
- */
-static void
-xprt_timer(struct rpc_task *task)
+static void xprt_timer(struct rpc_task *task)
 {
-       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
 
-       spin_lock(&xprt->sock_lock);
-       if (req->rq_received)
-               goto out;
-
-       xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
-       __xprt_put_cong(xprt, req);
+       dprintk("RPC: %4d xprt_timer\n", task->tk_pid);
 
-       dprintk("RPC: %4d xprt_timer (%s request)\n",
-               task->tk_pid, req ? "pending" : "backlogged");
-
-       task->tk_status  = -ETIMEDOUT;
-out:
+       spin_lock(&xprt->transport_lock);
+       if (!req->rq_received) {
+               if (xprt->ops->timer)
+                       xprt->ops->timer(task);
+               task->tk_status = -ETIMEDOUT;
+       }
        task->tk_timeout = 0;
        rpc_wake_up_task(task);
-       spin_unlock(&xprt->sock_lock);
+       spin_unlock(&xprt->transport_lock);
 }
 
-/*
- * Place the actual RPC call.
- * We have to copy the iovec because sendmsg fiddles with its contents.
+/**
+ * xprt_prepare_transmit - reserve the transport before sending a request
+ * @task: RPC task about to send a request
+ *
  */
-int
-xprt_prepare_transmit(struct rpc_task *task)
+int xprt_prepare_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
@@ -1191,12 +690,12 @@ xprt_prepare_transmit(struct rpc_task *task)
        if (xprt->shutdown)
                return -EIO;
 
-       spin_lock_bh(&xprt->sock_lock);
+       spin_lock_bh(&xprt->transport_lock);
        if (req->rq_received && !req->rq_bytes_sent) {
                err = req->rq_received;
                goto out_unlock;
        }
-       if (!__xprt_lock_write(xprt, task)) {
+       if (!xprt->ops->reserve_xprt(task)) {
                err = -EAGAIN;
                goto out_unlock;
        }
@@ -1206,39 +705,42 @@ xprt_prepare_transmit(struct rpc_task *task)
                goto out_unlock;
        }
 out_unlock:
-       spin_unlock_bh(&xprt->sock_lock);
+       spin_unlock_bh(&xprt->transport_lock);
        return err;
 }
 
 void
-xprt_transmit(struct rpc_task *task)
+xprt_abort_transmit(struct rpc_task *task)
+{
+       struct rpc_xprt *xprt = task->tk_xprt;
+
+       xprt_release_write(xprt, task);
+}
+
+/**
+ * xprt_transmit - send an RPC request on a transport
+ * @task: controlling RPC task
+ *
+ * We have to copy the iovec because sendmsg fiddles with its contents.
+ */
+void xprt_transmit(struct rpc_task *task)
 {
-       struct rpc_clnt *clnt = task->tk_client;
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
-       int status, retry = 0;
-
+       int status;
 
        dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
 
-       /* set up everything as needed. */
-       /* Write the record marker */
-       if (xprt->stream) {
-               u32     *marker = req->rq_svec[0].iov_base;
-
-               *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
-       }
-
        smp_rmb();
        if (!req->rq_received) {
                if (list_empty(&req->rq_list)) {
-                       spin_lock_bh(&xprt->sock_lock);
+                       spin_lock_bh(&xprt->transport_lock);
                        /* Update the softirq receive buffer */
                        memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
                                        sizeof(req->rq_private_buf));
                        /* Add request to the receive list */
                        list_add_tail(&req->rq_list, &xprt->recv);
-                       spin_unlock_bh(&xprt->sock_lock);
+                       spin_unlock_bh(&xprt->transport_lock);
                        xprt_reset_majortimeo(req);
                        /* Turn off autodisconnect */
                        del_singleshot_timer_sync(&xprt->timer);
@@ -1246,40 +748,19 @@ xprt_transmit(struct rpc_task *task)
        } else if (!req->rq_bytes_sent)
                return;
 
-       /* Continue transmitting the packet/record. We must be careful
-        * to cope with writespace callbacks arriving _after_ we have
-        * called xprt_sendmsg().
-        */
-       while (1) {
-               req->rq_xtime = jiffies;
-               status = xprt_sendmsg(xprt, req);
-
-               if (status < 0)
-                       break;
-
-               if (xprt->stream) {
-                       req->rq_bytes_sent += status;
-
-                       /* If we've sent the entire packet, immediately
-                        * reset the count of bytes sent. */
-                       if (req->rq_bytes_sent >= req->rq_slen) {
-                               req->rq_bytes_sent = 0;
-                               goto out_receive;
-                       }
-               } else {
-                       if (status >= req->rq_slen)
-                               goto out_receive;
-                       status = -EAGAIN;
-                       break;
-               }
-
-               dprintk("RPC: %4d xmit incomplete (%d left of %d)\n",
-                               task->tk_pid, req->rq_slen - req->rq_bytes_sent,
-                               req->rq_slen);
-
-               status = -EAGAIN;
-               if (retry++ > 50)
-                       break;
+       status = xprt->ops->send_request(task);
+       if (status == 0) {
+               dprintk("RPC: %4d xmit complete\n", task->tk_pid);
+               spin_lock_bh(&xprt->transport_lock);
+               xprt->ops->set_retrans_timeout(task);
+               /* Don't race with disconnect */
+               if (!xprt_connected(xprt))
+                       task->tk_status = -ENOTCONN;
+               else if (!req->rq_received)
+                       rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
+               xprt->ops->release_xprt(xprt, task);
+               spin_unlock_bh(&xprt->transport_lock);
+               return;
        }
 
        /* Note: at this point, task->tk_sleeping has not yet been set,
@@ -1289,60 +770,19 @@ xprt_transmit(struct rpc_task *task)
        task->tk_status = status;
 
        switch (status) {
-       case -EAGAIN:
-               if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
-                       /* Protect against races with xprt_write_space */
-                       spin_lock_bh(&xprt->sock_lock);
-                       /* Don't race with disconnect */
-                       if (!xprt_connected(xprt))
-                               task->tk_status = -ENOTCONN;
-                       else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
-                               task->tk_timeout = req->rq_timeout;
-                               rpc_sleep_on(&xprt->pending, task, NULL, NULL);
-                       }
-                       spin_unlock_bh(&xprt->sock_lock);
-                       return;
-               }
-               /* Keep holding the socket if it is blocked */
-               rpc_delay(task, HZ>>4);
-               return;
        case -ECONNREFUSED:
-               task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
                rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+       case -EAGAIN:
        case -ENOTCONN:
                return;
        default:
-               if (xprt->stream)
-                       xprt_disconnect(xprt);
+               break;
        }
        xprt_release_write(xprt, task);
        return;
- out_receive:
-       dprintk("RPC: %4d xmit complete\n", task->tk_pid);
-       /* Set the task's receive timeout value */
-       spin_lock_bh(&xprt->sock_lock);
-       if (!xprt->nocong) {
-               int timer = task->tk_msg.rpc_proc->p_timer;
-               task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
-               task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries;
-               if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0)
-                       task->tk_timeout = xprt->timeout.to_maxval;
-       } else
-               task->tk_timeout = req->rq_timeout;
-       /* Don't race with disconnect */
-       if (!xprt_connected(xprt))
-               task->tk_status = -ENOTCONN;
-       else if (!req->rq_received)
-               rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
-       __xprt_release_write(xprt, task);
-       spin_unlock_bh(&xprt->sock_lock);
 }
 
-/*
- * Reserve an RPC call slot.
- */
-static inline void
-do_xprt_reserve(struct rpc_task *task)
+static inline void do_xprt_reserve(struct rpc_task *task)
 {
        struct rpc_xprt *xprt = task->tk_xprt;
 
@@ -1362,22 +802,25 @@ do_xprt_reserve(struct rpc_task *task)
        rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
 }
 
-void
-xprt_reserve(struct rpc_task *task)
+/**
+ * xprt_reserve - allocate an RPC request slot
+ * @task: RPC task requesting a slot allocation
+ *
+ * If no more slots are available, place the task on the transport's
+ * backlog queue.
+ */
+void xprt_reserve(struct rpc_task *task)
 {
        struct rpc_xprt *xprt = task->tk_xprt;
 
        task->tk_status = -EIO;
        if (!xprt->shutdown) {
-               spin_lock(&xprt->xprt_lock);
+               spin_lock(&xprt->reserve_lock);
                do_xprt_reserve(task);
-               spin_unlock(&xprt->xprt_lock);
+               spin_unlock(&xprt->reserve_lock);
        }
 }
 
-/*
- * Allocate a 'unique' XID
- */
 static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
 {
        return xprt->xid++;
@@ -1388,11 +831,7 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
        get_random_bytes(&xprt->xid, sizeof(xprt->xid));
 }
 
-/*
- * Initialize RPC request
- */
-static void
-xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
@@ -1400,128 +839,104 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
        req->rq_task    = task;
        req->rq_xprt    = xprt;
        req->rq_xid     = xprt_alloc_xid(xprt);
+       req->rq_release_snd_buf = NULL;
        dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
                        req, ntohl(req->rq_xid));
 }
 
-/*
- * Release an RPC call slot
+/**
+ * xprt_release - release an RPC request slot
+ * @task: task which is finished with the slot
+ *
  */
-void
-xprt_release(struct rpc_task *task)
+void xprt_release(struct rpc_task *task)
 {
        struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_rqst *req;
 
        if (!(req = task->tk_rqstp))
                return;
-       spin_lock_bh(&xprt->sock_lock);
-       __xprt_release_write(xprt, task);
-       __xprt_put_cong(xprt, req);
+       spin_lock_bh(&xprt->transport_lock);
+       xprt->ops->release_xprt(xprt, task);
+       if (xprt->ops->release_request)
+               xprt->ops->release_request(task);
        if (!list_empty(&req->rq_list))
                list_del(&req->rq_list);
        xprt->last_used = jiffies;
        if (list_empty(&xprt->recv) && !xprt->shutdown)
-               mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT);
-       spin_unlock_bh(&xprt->sock_lock);
+               mod_timer(&xprt->timer,
+                               xprt->last_used + xprt->idle_timeout);
+       spin_unlock_bh(&xprt->transport_lock);
        task->tk_rqstp = NULL;
+       if (req->rq_release_snd_buf)
+               req->rq_release_snd_buf(req);
        memset(req, 0, sizeof(*req));   /* mark unused */
 
        dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
 
-       spin_lock(&xprt->xprt_lock);
+       spin_lock(&xprt->reserve_lock);
        list_add(&req->rq_list, &xprt->free);
-       xprt_clear_backlog(xprt);
-       spin_unlock(&xprt->xprt_lock);
-}
-
-/*
- * Set default timeout parameters
- */
-static void
-xprt_default_timeout(struct rpc_timeout *to, int proto)
-{
-       if (proto == IPPROTO_UDP)
-               xprt_set_timeout(to, 5,  5 * HZ);
-       else
-               xprt_set_timeout(to, 5, 60 * HZ);
+       rpc_wake_up_next(&xprt->backlog);
+       spin_unlock(&xprt->reserve_lock);
 }
 
-/*
- * Set constant timeout
+/**
+ * xprt_set_timeout - set constant RPC timeout
+ * @to: RPC timeout parameters to set up
+ * @retr: number of retries
+ * @incr: amount of increase after each retry
+ *
  */
-void
-xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
+void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
 {
        to->to_initval   = 
        to->to_increment = incr;
-       to->to_maxval    = incr * retr;
+       to->to_maxval    = to->to_initval + (incr * retr);
        to->to_retries   = retr;
        to->to_exponential = 0;
 }
 
-unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-
-/*
- * Initialize an RPC client
- */
-static struct rpc_xprt *
-xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
+static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
 {
+       int result;
        struct rpc_xprt *xprt;
-       unsigned int entries;
-       size_t slot_table_size;
        struct rpc_rqst *req;
 
-       dprintk("RPC:      setting up %s transport...\n",
-                               proto == IPPROTO_UDP? "UDP" : "TCP");
-
-       entries = (proto == IPPROTO_TCP)?
-               xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries;
-
        if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
                return ERR_PTR(-ENOMEM);
        memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
-       xprt->max_reqs = entries;
-       slot_table_size = entries * sizeof(xprt->slot[0]);
-       xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
-       if (xprt->slot == NULL) {
-               kfree(xprt);
-               return ERR_PTR(-ENOMEM);
-       }
-       memset(xprt->slot, 0, slot_table_size);
 
        xprt->addr = *ap;
-       xprt->prot = proto;
-       xprt->stream = (proto == IPPROTO_TCP)? 1 : 0;
-       if (xprt->stream) {
-               xprt->cwnd = RPC_MAXCWND(xprt);
-               xprt->nocong = 1;
-               xprt->max_payload = (1U << 31) - 1;
-       } else {
-               xprt->cwnd = RPC_INITCWND;
-               xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+
+       switch (proto) {
+       case IPPROTO_UDP:
+               result = xs_setup_udp(xprt, to);
+               break;
+       case IPPROTO_TCP:
+               result = xs_setup_tcp(xprt, to);
+               break;
+       default:
+               printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
+                               proto);
+               result = -EIO;
+               break;
+       }
+       if (result) {
+               kfree(xprt);
+               return ERR_PTR(result);
        }
-       spin_lock_init(&xprt->sock_lock);
-       spin_lock_init(&xprt->xprt_lock);
-       init_waitqueue_head(&xprt->cong_wait);
+
+       spin_lock_init(&xprt->transport_lock);
+       spin_lock_init(&xprt->reserve_lock);
 
        INIT_LIST_HEAD(&xprt->free);
        INIT_LIST_HEAD(&xprt->recv);
-       INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt);
-       INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt);
+       INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
        init_timer(&xprt->timer);
        xprt->timer.function = xprt_init_autodisconnect;
        xprt->timer.data = (unsigned long) xprt;
        xprt->last_used = jiffies;
-       xprt->port = XPRT_MAX_RESVPORT;
-
-       /* Set timeout parameters */
-       if (to) {
-               xprt->timeout = *to;
-       } else
-               xprt_default_timeout(&xprt->timeout, xprt->prot);
+       xprt->cwnd = RPC_INITCWND;
 
        rpc_init_wait_queue(&xprt->pending, "xprt_pending");
        rpc_init_wait_queue(&xprt->sending, "xprt_sending");
@@ -1529,139 +944,25 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
        rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
 
        /* initialize free list */
-       for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--)
+       for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
                list_add(&req->rq_list, &xprt->free);
 
        xprt_init_xid(xprt);
 
-       /* Check whether we want to use a reserved port */
-       xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
-
        dprintk("RPC:      created transport %p with %u slots\n", xprt,
                        xprt->max_reqs);
        
        return xprt;
 }
 
-/*
- * Bind to a reserved port
- */
-static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
-{
-       struct sockaddr_in myaddr = {
-               .sin_family = AF_INET,
-       };
-       int             err, port;
-
-       /* Were we already bound to a given port? Try to reuse it */
-       port = xprt->port;
-       do {
-               myaddr.sin_port = htons(port);
-               err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
-                                               sizeof(myaddr));
-               if (err == 0) {
-                       xprt->port = port;
-                       return 0;
-               }
-               if (--port == 0)
-                       port = XPRT_MAX_RESVPORT;
-       } while (err == -EADDRINUSE && port != xprt->port);
-
-       printk("RPC: Can't bind to reserved port (%d).\n", -err);
-       return err;
-}
-
-static void
-xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
-{
-       struct sock     *sk = sock->sk;
-
-       if (xprt->inet)
-               return;
-
-       write_lock_bh(&sk->sk_callback_lock);
-       sk->sk_user_data = xprt;
-       xprt->old_data_ready = sk->sk_data_ready;
-       xprt->old_state_change = sk->sk_state_change;
-       xprt->old_write_space = sk->sk_write_space;
-       if (xprt->prot == IPPROTO_UDP) {
-               sk->sk_data_ready = udp_data_ready;
-               sk->sk_no_check = UDP_CSUM_NORCV;
-               xprt_set_connected(xprt);
-       } else {
-               tcp_sk(sk)->nonagle = 1;        /* disable Nagle's algorithm */
-               sk->sk_data_ready = tcp_data_ready;
-               sk->sk_state_change = tcp_state_change;
-               xprt_clear_connected(xprt);
-       }
-       sk->sk_write_space = xprt_write_space;
-
-       /* Reset to new socket */
-       xprt->sock = sock;
-       xprt->inet = sk;
-       write_unlock_bh(&sk->sk_callback_lock);
-
-       return;
-}
-
-/*
- * Set socket buffer length
- */
-void
-xprt_sock_setbufsize(struct rpc_xprt *xprt)
-{
-       struct sock *sk = xprt->inet;
-
-       if (xprt->stream)
-               return;
-       if (xprt->rcvsize) {
-               sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-               sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
-       }
-       if (xprt->sndsize) {
-               sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-               sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
-               sk->sk_write_space(sk);
-       }
-}
-
-/*
- * Datastream sockets are created here, but xprt_connect will create
- * and connect stream sockets.
- */
-static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport)
-{
-       struct socket   *sock;
-       int             type, err;
-
-       dprintk("RPC:      xprt_create_socket(%s %d)\n",
-                          (proto == IPPROTO_UDP)? "udp" : "tcp", proto);
-
-       type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
-
-       if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) {
-               printk("RPC: can't create socket (%d).\n", -err);
-               return NULL;
-       }
-
-       /* If the caller has the capability, bind to a reserved port */
-       if (resvport && xprt_bindresvport(xprt, sock) < 0) {
-               printk("RPC: can't bind to reserved port.\n");
-               goto failed;
-       }
-
-       return sock;
-
-failed:
-       sock_release(sock);
-       return NULL;
-}
-
-/*
- * Create an RPC client transport given the protocol and peer address.
+/**
+ * xprt_create_proto - create an RPC client transport
+ * @proto: requested transport protocol
+ * @sap: remote peer's address
+ * @to: timeout parameters for new transport
+ *
  */
-struct rpc_xprt *
-xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
+struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
 {
        struct rpc_xprt *xprt;
 
@@ -1673,46 +974,26 @@ xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
        return xprt;
 }
 
-/*
- * Prepare for transport shutdown.
- */
-static void
-xprt_shutdown(struct rpc_xprt *xprt)
+static void xprt_shutdown(struct rpc_xprt *xprt)
 {
        xprt->shutdown = 1;
        rpc_wake_up(&xprt->sending);
        rpc_wake_up(&xprt->resend);
-       rpc_wake_up(&xprt->pending);
+       xprt_wake_pending_tasks(xprt, -EIO);
        rpc_wake_up(&xprt->backlog);
-       wake_up(&xprt->cong_wait);
        del_timer_sync(&xprt->timer);
-
-       /* synchronously wait for connect worker to finish */
-       cancel_delayed_work(&xprt->sock_connect);
-       flush_scheduled_work();
 }
 
-/*
- * Clear the xprt backlog queue
- */
-static int
-xprt_clear_backlog(struct rpc_xprt *xprt) {
-       rpc_wake_up_next(&xprt->backlog);
-       wake_up(&xprt->cong_wait);
-       return 1;
-}
-
-/*
- * Destroy an RPC transport, killing off all requests.
+/**
+ * xprt_destroy - destroy an RPC transport, killing off all requests.
+ * @xprt: transport to destroy
+ *
  */
-int
-xprt_destroy(struct rpc_xprt *xprt)
+int xprt_destroy(struct rpc_xprt *xprt)
 {
        dprintk("RPC:      destroying transport %p\n", xprt);
        xprt_shutdown(xprt);
-       xprt_disconnect(xprt);
-       xprt_close(xprt);
-       kfree(xprt->slot);
+       xprt->ops->destroy(xprt);
        kfree(xprt);
 
        return 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
new file mode 100644 (file)
index 0000000..2e15292
--- /dev/null
@@ -0,0 +1,1252 @@
+/*
+ * linux/net/sunrpc/xprtsock.c
+ *
+ * Client-side transport implementation for sockets.
+ *
+ * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
+ * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
+ * TCP NFS related read + write fixes
+ *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
+ *
+ * Rewrite of larges part of the code in order to stabilize TCP stuff.
+ * Fix behaviour when socket buffer is full.
+ *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
+ *
+ * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/mm.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/file.h>
+
+#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+
+/*
+ * How many times to try sending a request on a socket before waiting
+ * for the socket buffer to clear.
+ */
+#define XS_SENDMSG_RETRY       (10U)
+
+/*
+ * Time out for an RPC UDP socket connect.  UDP socket connects are
+ * synchronous, but we set a timeout anyway in case of resource
+ * exhaustion on the local host.
+ */
+#define XS_UDP_CONN_TO         (5U * HZ)
+
+/*
+ * Wait duration for an RPC TCP connection to be established.  Solaris
+ * NFS over TCP uses 60 seconds, for example, which is in line with how
+ * long a server takes to reboot.
+ */
+#define XS_TCP_CONN_TO         (60U * HZ)
+
+/*
+ * Wait duration for a reply from the RPC portmapper.
+ */
+#define XS_BIND_TO             (60U * HZ)
+
+/*
+ * Delay if a UDP socket connect error occurs.  This is most likely some
+ * kind of resource problem on the local host.
+ */
+#define XS_UDP_REEST_TO                (2U * HZ)
+
+/*
+ * The reestablish timeout allows clients to delay for a bit before attempting
+ * to reconnect to a server that just dropped our connection.
+ *
+ * We implement an exponential backoff when trying to reestablish a TCP
+ * transport connection with the server.  Some servers like to drop a TCP
+ * connection when they are overworked, so we start with a short timeout and
+ * increase over time if the server is down or not responding.
+ */
+#define XS_TCP_INIT_REEST_TO   (3U * HZ)
+#define XS_TCP_MAX_REEST_TO    (5U * 60 * HZ)
+
+/*
+ * TCP idle timeout; client drops the transport socket if it is idle
+ * for this long.  Note that we also timeout UDP sockets to prevent
+ * holding port numbers when there is no RPC traffic.
+ */
+#define XS_IDLE_DISC_TO                (5U * 60 * HZ)
+
+#ifdef RPC_DEBUG
+# undef  RPC_DEBUG_DATA
+# define RPCDBG_FACILITY       RPCDBG_TRANS
+#endif
+
+#ifdef RPC_DEBUG_DATA
+static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
+{
+       u8 *buf = (u8 *) packet;
+       int j;
+
+       dprintk("RPC:      %s\n", msg);
+       for (j = 0; j < count && j < 128; j += 4) {
+               if (!(j & 31)) {
+                       if (j)
+                               dprintk("\n");
+                       dprintk("0x%04x ", j);
+               }
+               dprintk("%02x%02x%02x%02x ",
+                       buf[j], buf[j+1], buf[j+2], buf[j+3]);
+       }
+       dprintk("\n");
+}
+#else
+static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
+{
+       /* NOP */
+}
+#endif
+
+#define XS_SENDMSG_FLAGS       (MSG_DONTWAIT | MSG_NOSIGNAL)
+
+static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+{
+       struct kvec iov = {
+               .iov_base       = xdr->head[0].iov_base + base,
+               .iov_len        = len - base,
+       };
+       struct msghdr msg = {
+               .msg_name       = addr,
+               .msg_namelen    = addrlen,
+               .msg_flags      = XS_SENDMSG_FLAGS,
+       };
+
+       if (xdr->len > len)
+               msg.msg_flags |= MSG_MORE;
+
+       if (likely(iov.iov_len))
+               return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+       return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+}
+
+static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+{
+       struct kvec iov = {
+               .iov_base       = xdr->tail[0].iov_base + base,
+               .iov_len        = len - base,
+       };
+       struct msghdr msg = {
+               .msg_flags      = XS_SENDMSG_FLAGS,
+       };
+
+       return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+}
+
+/**
+ * xs_sendpages - write pages directly to a socket
+ * @sock: socket to send on
+ * @addr: UDP only -- address of destination
+ * @addrlen: UDP only -- length of destination address
+ * @xdr: buffer containing this request
+ * @base: starting position in the buffer
+ *
+ */
+static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
+{
+       struct page **ppage = xdr->pages;
+       unsigned int len, pglen = xdr->page_len;
+       int err, ret = 0;
+       ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+
+       if (unlikely(!sock))
+               return -ENOTCONN;
+
+       clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+
+       len = xdr->head[0].iov_len;
+       if (base < len || (addr != NULL && base == 0)) {
+               err = xs_send_head(sock, addr, addrlen, xdr, base, len);
+               if (ret == 0)
+                       ret = err;
+               else if (err > 0)
+                       ret += err;
+               if (err != (len - base))
+                       goto out;
+               base = 0;
+       } else
+               base -= len;
+
+       if (unlikely(pglen == 0))
+               goto copy_tail;
+       if (unlikely(base >= pglen)) {
+               base -= pglen;
+               goto copy_tail;
+       }
+       if (base || xdr->page_base) {
+               pglen -= base;
+               base += xdr->page_base;
+               ppage += base >> PAGE_CACHE_SHIFT;
+               base &= ~PAGE_CACHE_MASK;
+       }
+
+       sendpage = sock->ops->sendpage ? : sock_no_sendpage;
+       do {
+               int flags = XS_SENDMSG_FLAGS;
+
+               len = PAGE_CACHE_SIZE;
+               if (base)
+                       len -= base;
+               if (pglen < len)
+                       len = pglen;
+
+               if (pglen != len || xdr->tail[0].iov_len != 0)
+                       flags |= MSG_MORE;
+
+               /* Hmm... We might be dealing with highmem pages */
+               if (PageHighMem(*ppage))
+                       sendpage = sock_no_sendpage;
+               err = sendpage(sock, *ppage, base, len, flags);
+               if (ret == 0)
+                       ret = err;
+               else if (err > 0)
+                       ret += err;
+               if (err != len)
+                       goto out;
+               base = 0;
+               ppage++;
+       } while ((pglen -= len) != 0);
+copy_tail:
+       len = xdr->tail[0].iov_len;
+       if (base < len) {
+               err = xs_send_tail(sock, xdr, base, len);
+               if (ret == 0)
+                       ret = err;
+               else if (err > 0)
+                       ret += err;
+       }
+out:
+       return ret;
+}
+
+/**
+ * xs_nospace - place task on wait queue if transmit was incomplete
+ * @task: task to put to sleep
+ *
+ */
+static void xs_nospace(struct rpc_task *task)
+{
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_xprt *xprt = req->rq_xprt;
+
+       dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
+                       task->tk_pid, req->rq_slen - req->rq_bytes_sent,
+                       req->rq_slen);
+
+       if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
+               /* Protect against races with write_space */
+               spin_lock_bh(&xprt->transport_lock);
+
+               /* Don't race with disconnect */
+               if (!xprt_connected(xprt))
+                       task->tk_status = -ENOTCONN;
+               else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
+                       xprt_wait_for_buffer_space(task);
+
+               spin_unlock_bh(&xprt->transport_lock);
+       } else
+               /* Keep holding the socket if it is blocked */
+               rpc_delay(task, HZ>>4);
+}
+
+/**
+ * xs_udp_send_request - write an RPC request to a UDP socket
+ * @task: address of RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ *        0:   The request has been sent
+ *   EAGAIN:   The socket was blocked, please call again later to
+ *             complete the request
+ * ENOTCONN:   Caller needs to invoke connect logic then call again
+ *    other:   Some other error occured, the request was not sent
+ */
+static int xs_udp_send_request(struct rpc_task *task)
+{
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_xprt *xprt = req->rq_xprt;
+       struct xdr_buf *xdr = &req->rq_snd_buf;
+       int status;
+
+       xs_pktdump("packet data:",
+                               req->rq_svec->iov_base,
+                               req->rq_svec->iov_len);
+
+       req->rq_xtime = jiffies;
+       status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
+                               sizeof(xprt->addr), xdr, req->rq_bytes_sent);
+
+       dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
+                       xdr->len - req->rq_bytes_sent, status);
+
+       if (likely(status >= (int) req->rq_slen))
+               return 0;
+
+       /* Still some bytes left; set up for a retry later. */
+       if (status > 0)
+               status = -EAGAIN;
+
+       switch (status) {
+       case -ENETUNREACH:
+       case -EPIPE:
+       case -ECONNREFUSED:
+               /* When the server has died, an ICMP port unreachable message
+                * prompts ECONNREFUSED. */
+               break;
+       case -EAGAIN:
+               xs_nospace(task);
+               break;
+       default:
+               dprintk("RPC:      sendmsg returned unrecognized error %d\n",
+                       -status);
+               break;
+       }
+
+       return status;
+}
+
+static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
+{
+       u32 reclen = buf->len - sizeof(rpc_fraghdr);
+       rpc_fraghdr *base = buf->head[0].iov_base;
+       *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
+}
+
+/**
+ * xs_tcp_send_request - write an RPC request to a TCP socket
+ * @task: address of RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ *        0:   The request has been sent
+ *   EAGAIN:   The socket was blocked, please call again later to
+ *             complete the request
+ * ENOTCONN:   Caller needs to invoke connect logic then call again
+ *    other:   Some other error occured, the request was not sent
+ *
+ * XXX: In the case of soft timeouts, should we eventually give up
+ *     if sendmsg is not able to make progress?
+ */
+static int xs_tcp_send_request(struct rpc_task *task)
+{
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_xprt *xprt = req->rq_xprt;
+       struct xdr_buf *xdr = &req->rq_snd_buf;
+       int status, retry = 0;
+
+       xs_encode_tcp_record_marker(&req->rq_snd_buf);
+
+       xs_pktdump("packet data:",
+                               req->rq_svec->iov_base,
+                               req->rq_svec->iov_len);
+
+       /* Continue transmitting the packet/record. We must be careful
+        * to cope with writespace callbacks arriving _after_ we have
+        * called sendmsg(). */
+       while (1) {
+               req->rq_xtime = jiffies;
+               status = xs_sendpages(xprt->sock, NULL, 0, xdr,
+                                               req->rq_bytes_sent);
+
+               dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
+                               xdr->len - req->rq_bytes_sent, status);
+
+               if (unlikely(status < 0))
+                       break;
+
+               /* If we've sent the entire packet, immediately
+                * reset the count of bytes sent. */
+               req->rq_bytes_sent += status;
+               if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+                       req->rq_bytes_sent = 0;
+                       return 0;
+               }
+
+               status = -EAGAIN;
+               if (retry++ > XS_SENDMSG_RETRY)
+                       break;
+       }
+
+       switch (status) {
+       case -EAGAIN:
+               xs_nospace(task);
+               break;
+       case -ECONNREFUSED:
+       case -ECONNRESET:
+       case -ENOTCONN:
+       case -EPIPE:
+               status = -ENOTCONN;
+               break;
+       default:
+               dprintk("RPC:      sendmsg returned unrecognized error %d\n",
+                       -status);
+               xprt_disconnect(xprt);
+               break;
+       }
+
+       return status;
+}
+
+/**
+ * xs_close - close a socket
+ * @xprt: transport
+ *
+ * This is used when all requests are complete; ie, no DRC state remains
+ * on the server we want to save.
+ */
+static void xs_close(struct rpc_xprt *xprt)
+{
+       struct socket *sock = xprt->sock;
+       struct sock *sk = xprt->inet;
+
+       if (!sk)
+               return;
+
+       dprintk("RPC:      xs_close xprt %p\n", xprt);
+
+       write_lock_bh(&sk->sk_callback_lock);
+       xprt->inet = NULL;
+       xprt->sock = NULL;
+
+       sk->sk_user_data = NULL;
+       sk->sk_data_ready = xprt->old_data_ready;
+       sk->sk_state_change = xprt->old_state_change;
+       sk->sk_write_space = xprt->old_write_space;
+       write_unlock_bh(&sk->sk_callback_lock);
+
+       sk->sk_no_check = 0;
+
+       sock_release(sock);
+}
+
+/**
+ * xs_destroy - prepare to shutdown a transport
+ * @xprt: doomed transport
+ *
+ */
+static void xs_destroy(struct rpc_xprt *xprt)
+{
+       dprintk("RPC:      xs_destroy xprt %p\n", xprt);
+
+       cancel_delayed_work(&xprt->connect_worker);
+       flush_scheduled_work();
+
+       xprt_disconnect(xprt);
+       xs_close(xprt);
+       kfree(xprt->slot);
+}
+
+static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
+{
+       return (struct rpc_xprt *) sk->sk_user_data;
+}
+
+/**
+ * xs_udp_data_ready - "data ready" callback for UDP sockets
+ * @sk: socket with data to read
+ * @len: how much data to read
+ *
+ */
+static void xs_udp_data_ready(struct sock *sk, int len)
+{
+       struct rpc_task *task;
+       struct rpc_xprt *xprt;
+       struct rpc_rqst *rovr;
+       struct sk_buff *skb;
+       int err, repsize, copied;
+       u32 _xid, *xp;
+
+       read_lock(&sk->sk_callback_lock);
+       dprintk("RPC:      xs_udp_data_ready...\n");
+       if (!(xprt = xprt_from_sock(sk)))
+               goto out;
+
+       if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
+               goto out;
+
+       if (xprt->shutdown)
+               goto dropit;
+
+       repsize = skb->len - sizeof(struct udphdr);
+       if (repsize < 4) {
+               dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
+               goto dropit;
+       }
+
+       /* Copy the XID from the skb... */
+       xp = skb_header_pointer(skb, sizeof(struct udphdr),
+                               sizeof(_xid), &_xid);
+       if (xp == NULL)
+               goto dropit;
+
+       /* Look up and lock the request corresponding to the given XID */
+       spin_lock(&xprt->transport_lock);
+       rovr = xprt_lookup_rqst(xprt, *xp);
+       if (!rovr)
+               goto out_unlock;
+       task = rovr->rq_task;
+
+       if ((copied = rovr->rq_private_buf.buflen) > repsize)
+               copied = repsize;
+
+       /* Suck it into the iovec, verify checksum if not done by hw. */
+       if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
+               goto out_unlock;
+
+       /* Something worked... */
+       dst_confirm(skb->dst);
+
+       xprt_adjust_cwnd(task, copied);
+       xprt_update_rtt(task);
+       xprt_complete_rqst(task, copied);
+
+ out_unlock:
+       spin_unlock(&xprt->transport_lock);
+ dropit:
+       skb_free_datagram(sk, skb);
+ out:
+       read_unlock(&sk->sk_callback_lock);
+}
+
+static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
+{
+       if (len > desc->count)
+               len = desc->count;
+       if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
+               dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
+                               len, desc->count);
+               return 0;
+       }
+       desc->offset += len;
+       desc->count -= len;
+       dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
+                       len, desc->count);
+       return len;
+}
+
+static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+       size_t len, used;
+       char *p;
+
+       p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
+       len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
+       used = xs_tcp_copy_data(desc, p, len);
+       xprt->tcp_offset += used;
+       if (used != len)
+               return;
+
+       xprt->tcp_reclen = ntohl(xprt->tcp_recm);
+       if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
+               xprt->tcp_flags |= XPRT_LAST_FRAG;
+       else
+               xprt->tcp_flags &= ~XPRT_LAST_FRAG;
+       xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
+
+       xprt->tcp_flags &= ~XPRT_COPY_RECM;
+       xprt->tcp_offset = 0;
+
+       /* Sanity check of the record length */
+       if (unlikely(xprt->tcp_reclen < 4)) {
+               dprintk("RPC:      invalid TCP record fragment length\n");
+               xprt_disconnect(xprt);
+               return;
+       }
+       dprintk("RPC:      reading TCP record fragment of length %d\n",
+                       xprt->tcp_reclen);
+}
+
+static void xs_tcp_check_recm(struct rpc_xprt *xprt)
+{
+       dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
+                       xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
+       if (xprt->tcp_offset == xprt->tcp_reclen) {
+               xprt->tcp_flags |= XPRT_COPY_RECM;
+               xprt->tcp_offset = 0;
+               if (xprt->tcp_flags & XPRT_LAST_FRAG) {
+                       xprt->tcp_flags &= ~XPRT_COPY_DATA;
+                       xprt->tcp_flags |= XPRT_COPY_XID;
+                       xprt->tcp_copied = 0;
+               }
+       }
+}
+
+static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+       size_t len, used;
+       char *p;
+
+       len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
+       dprintk("RPC:      reading XID (%Zu bytes)\n", len);
+       p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
+       used = xs_tcp_copy_data(desc, p, len);
+       xprt->tcp_offset += used;
+       if (used != len)
+               return;
+       xprt->tcp_flags &= ~XPRT_COPY_XID;
+       xprt->tcp_flags |= XPRT_COPY_DATA;
+       xprt->tcp_copied = 4;
+       dprintk("RPC:      reading reply for XID %08x\n",
+                                               ntohl(xprt->tcp_xid));
+       xs_tcp_check_recm(xprt);
+}
+
+static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+       struct rpc_rqst *req;
+       struct xdr_buf *rcvbuf;
+       size_t len;
+       ssize_t r;
+
+       /* Find and lock the request corresponding to this xid */
+       spin_lock(&xprt->transport_lock);
+       req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
+       if (!req) {
+               xprt->tcp_flags &= ~XPRT_COPY_DATA;
+               dprintk("RPC:      XID %08x request not found!\n",
+                               ntohl(xprt->tcp_xid));
+               spin_unlock(&xprt->transport_lock);
+               return;
+       }
+
+       rcvbuf = &req->rq_private_buf;
+       len = desc->count;
+       if (len > xprt->tcp_reclen - xprt->tcp_offset) {
+               skb_reader_t my_desc;
+
+               len = xprt->tcp_reclen - xprt->tcp_offset;
+               memcpy(&my_desc, desc, sizeof(my_desc));
+               my_desc.count = len;
+               r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+                                         &my_desc, xs_tcp_copy_data);
+               desc->count -= r;
+               desc->offset += r;
+       } else
+               r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+                                         desc, xs_tcp_copy_data);
+
+       if (r > 0) {
+               xprt->tcp_copied += r;
+               xprt->tcp_offset += r;
+       }
+       if (r != len) {
+               /* Error when copying to the receive buffer,
+                * usually because we weren't able to allocate
+                * additional buffer pages. All we can do now
+                * is turn off XPRT_COPY_DATA, so the request
+                * will not receive any additional updates,
+                * and time out.
+                * Any remaining data from this record will
+                * be discarded.
+                */
+               xprt->tcp_flags &= ~XPRT_COPY_DATA;
+               dprintk("RPC:      XID %08x truncated request\n",
+                               ntohl(xprt->tcp_xid));
+               dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+                               xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+               goto out;
+       }
+
+       dprintk("RPC:      XID %08x read %Zd bytes\n",
+                       ntohl(xprt->tcp_xid), r);
+       dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+                       xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+
+       if (xprt->tcp_copied == req->rq_private_buf.buflen)
+               xprt->tcp_flags &= ~XPRT_COPY_DATA;
+       else if (xprt->tcp_offset == xprt->tcp_reclen) {
+               if (xprt->tcp_flags & XPRT_LAST_FRAG)
+                       xprt->tcp_flags &= ~XPRT_COPY_DATA;
+       }
+
+out:
+       if (!(xprt->tcp_flags & XPRT_COPY_DATA))
+               xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
+       spin_unlock(&xprt->transport_lock);
+       xs_tcp_check_recm(xprt);
+}
+
+static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+       size_t len;
+
+       len = xprt->tcp_reclen - xprt->tcp_offset;
+       if (len > desc->count)
+               len = desc->count;
+       desc->count -= len;
+       desc->offset += len;
+       xprt->tcp_offset += len;
+       dprintk("RPC:      discarded %Zu bytes\n", len);
+       xs_tcp_check_recm(xprt);
+}
+
+static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
+{
+       struct rpc_xprt *xprt = rd_desc->arg.data;
+       skb_reader_t desc = {
+               .skb    = skb,
+               .offset = offset,
+               .count  = len,
+               .csum   = 0
+       };
+
+       dprintk("RPC:      xs_tcp_data_recv started\n");
+       do {
+               /* Read in a new fragment marker if necessary */
+               /* Can we ever really expect to get completely empty fragments? */
+               if (xprt->tcp_flags & XPRT_COPY_RECM) {
+                       xs_tcp_read_fraghdr(xprt, &desc);
+                       continue;
+               }
+               /* Read in the xid if necessary */
+               if (xprt->tcp_flags & XPRT_COPY_XID) {
+                       xs_tcp_read_xid(xprt, &desc);
+                       continue;
+               }
+               /* Read in the request data */
+               if (xprt->tcp_flags & XPRT_COPY_DATA) {
+                       xs_tcp_read_request(xprt, &desc);
+                       continue;
+               }
+               /* Skip over any trailing bytes on short reads */
+               xs_tcp_read_discard(xprt, &desc);
+       } while (desc.count);
+       dprintk("RPC:      xs_tcp_data_recv done\n");
+       return len - desc.count;
+}
+
+/**
+ * xs_tcp_data_ready - "data ready" callback for TCP sockets
+ * @sk: socket with data to read
+ * @bytes: how much data to read
+ *
+ */
+static void xs_tcp_data_ready(struct sock *sk, int bytes)
+{
+       struct rpc_xprt *xprt;
+       read_descriptor_t rd_desc;
+
+       read_lock(&sk->sk_callback_lock);
+       dprintk("RPC:      xs_tcp_data_ready...\n");
+       if (!(xprt = xprt_from_sock(sk)))
+               goto out;
+       if (xprt->shutdown)
+               goto out;
+
+       /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
+       rd_desc.arg.data = xprt;
+       rd_desc.count = 65536;
+       tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
+out:
+       read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_tcp_state_change - callback to handle TCP socket state changes
+ * @sk: socket whose state has changed
+ *
+ */
+static void xs_tcp_state_change(struct sock *sk)
+{
+       struct rpc_xprt *xprt;
+
+       read_lock(&sk->sk_callback_lock);
+       if (!(xprt = xprt_from_sock(sk)))
+               goto out;
+       dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
+       dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
+                               sk->sk_state, xprt_connected(xprt),
+                               sock_flag(sk, SOCK_DEAD),
+                               sock_flag(sk, SOCK_ZAPPED));
+
+       switch (sk->sk_state) {
+       case TCP_ESTABLISHED:
+               spin_lock_bh(&xprt->transport_lock);
+               if (!xprt_test_and_set_connected(xprt)) {
+                       /* Reset TCP record info */
+                       xprt->tcp_offset = 0;
+                       xprt->tcp_reclen = 0;
+                       xprt->tcp_copied = 0;
+                       xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
+                       xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+                       xprt_wake_pending_tasks(xprt, 0);
+               }
+               spin_unlock_bh(&xprt->transport_lock);
+               break;
+       case TCP_SYN_SENT:
+       case TCP_SYN_RECV:
+               break;
+       default:
+               xprt_disconnect(xprt);
+               break;
+       }
+ out:
+       read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_udp_write_space - callback invoked when socket buffer space
+ *                             becomes available
+ * @sk: socket whose state has changed
+ *
+ * Called when more output buffer space is available for this socket.
+ * We try not to wake our writers until they can make "significant"
+ * progress, otherwise we'll waste resources thrashing kernel_sendmsg
+ * with a bunch of small requests.
+ */
+static void xs_udp_write_space(struct sock *sk)
+{
+       read_lock(&sk->sk_callback_lock);
+
+       /* from net/core/sock.c:sock_def_write_space */
+       if (sock_writeable(sk)) {
+               struct socket *sock;
+               struct rpc_xprt *xprt;
+
+               if (unlikely(!(sock = sk->sk_socket)))
+                       goto out;
+               if (unlikely(!(xprt = xprt_from_sock(sk))))
+                       goto out;
+               if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
+                       goto out;
+
+               xprt_write_space(xprt);
+       }
+
+ out:
+       read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_tcp_write_space - callback invoked when socket buffer space
+ *                             becomes available
+ * @sk: socket whose state has changed
+ *
+ * Called when more output buffer space is available for this socket.
+ * We try not to wake our writers until they can make "significant"
+ * progress, otherwise we'll waste resources thrashing kernel_sendmsg
+ * with a bunch of small requests.
+ */
+static void xs_tcp_write_space(struct sock *sk)
+{
+       read_lock(&sk->sk_callback_lock);
+
+       /* from net/core/stream.c:sk_stream_write_space */
+       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+               struct socket *sock;
+               struct rpc_xprt *xprt;
+
+               if (unlikely(!(sock = sk->sk_socket)))
+                       goto out;
+               if (unlikely(!(xprt = xprt_from_sock(sk))))
+                       goto out;
+               if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
+                       goto out;
+
+               xprt_write_space(xprt);
+       }
+
+ out:
+       read_unlock(&sk->sk_callback_lock);
+}
+
+static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
+{
+       struct sock *sk = xprt->inet;
+
+       if (xprt->rcvsize) {
+               sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+               sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
+       }
+       if (xprt->sndsize) {
+               sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+               sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
+               sk->sk_write_space(sk);
+       }
+}
+
+/**
+ * xs_udp_set_buffer_size - set send and receive limits
+ * @xprt: generic transport
+ * @sndsize: requested size of send buffer, in bytes
+ * @rcvsize: requested size of receive buffer, in bytes
+ *
+ * Set socket send and receive buffer size limits.
+ */
+static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
+{
+       xprt->sndsize = 0;
+       if (sndsize)
+               xprt->sndsize = sndsize + 1024;
+       xprt->rcvsize = 0;
+       if (rcvsize)
+               xprt->rcvsize = rcvsize + 1024;
+
+       xs_udp_do_set_buffer_size(xprt);
+}
+
+/**
+ * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
+ * @task: task that timed out
+ *
+ * Adjust the congestion window after a retransmit timeout has occurred.
+ */
+static void xs_udp_timer(struct rpc_task *task)
+{
+       xprt_adjust_cwnd(task, -ETIMEDOUT);
+}
+
+static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
+{
+       struct sockaddr_in myaddr = {
+               .sin_family = AF_INET,
+       };
+       int err;
+       unsigned short port = xprt->port;
+
+       do {
+               myaddr.sin_port = htons(port);
+               err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
+                                               sizeof(myaddr));
+               if (err == 0) {
+                       xprt->port = port;
+                       dprintk("RPC:      xs_bindresvport bound to port %u\n",
+                                       port);
+                       return 0;
+               }
+               if (port <= xprt_min_resvport)
+                       port = xprt_max_resvport;
+               else
+                       port--;
+       } while (err == -EADDRINUSE && port != xprt->port);
+
+       dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
+       return err;
+}
+
+/**
+ * xs_udp_connect_worker - set up a UDP socket
+ * @args: RPC transport to connect
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_udp_connect_worker(void *args)
+{
+       struct rpc_xprt *xprt = (struct rpc_xprt *) args;
+       struct socket *sock = xprt->sock;
+       int err, status = -EIO;
+
+       if (xprt->shutdown || xprt->addr.sin_port == 0)
+               goto out;
+
+       dprintk("RPC:      xs_udp_connect_worker for xprt %p\n", xprt);
+
+       /* Start by resetting any existing state */
+       xs_close(xprt);
+
+       if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
+               dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
+               goto out;
+       }
+
+       if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+               sock_release(sock);
+               goto out;
+       }
+
+       if (!xprt->inet) {
+               struct sock *sk = sock->sk;
+
+               write_lock_bh(&sk->sk_callback_lock);
+
+               sk->sk_user_data = xprt;
+               xprt->old_data_ready = sk->sk_data_ready;
+               xprt->old_state_change = sk->sk_state_change;
+               xprt->old_write_space = sk->sk_write_space;
+               sk->sk_data_ready = xs_udp_data_ready;
+               sk->sk_write_space = xs_udp_write_space;
+               sk->sk_no_check = UDP_CSUM_NORCV;
+
+               xprt_set_connected(xprt);
+
+               /* Reset to new socket */
+               xprt->sock = sock;
+               xprt->inet = sk;
+
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+       xs_udp_do_set_buffer_size(xprt);
+       status = 0;
+out:
+       xprt_wake_pending_tasks(xprt, status);
+       xprt_clear_connecting(xprt);
+}
+
+/*
+ * We need to preserve the port number so the reply cache on the server can
+ * find our cached RPC replies when we get around to reconnecting.
+ */
+static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
+{
+       int result;
+       struct socket *sock = xprt->sock;
+       struct sockaddr any;
+
+       dprintk("RPC:      disconnecting xprt %p to reuse port\n", xprt);
+
+       /*
+        * Disconnect the transport socket by doing a connect operation
+        * with AF_UNSPEC.  This should return immediately...
+        */
+       memset(&any, 0, sizeof(any));
+       any.sa_family = AF_UNSPEC;
+       result = sock->ops->connect(sock, &any, sizeof(any), 0);
+       if (result)
+               dprintk("RPC:      AF_UNSPEC connect return code %d\n",
+                               result);
+}
+
+/**
+ * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
+ * @args: RPC transport to connect
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_tcp_connect_worker(void *args)
+{
+       struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+       struct socket *sock = xprt->sock;
+       int err, status = -EIO;
+
+       if (xprt->shutdown || xprt->addr.sin_port == 0)
+               goto out;
+
+       dprintk("RPC:      xs_tcp_connect_worker for xprt %p\n", xprt);
+
+       if (!xprt->sock) {
+               /* start from scratch */
+               if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
+                       dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
+                       goto out;
+               }
+
+               if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+                       sock_release(sock);
+                       goto out;
+               }
+       } else
+               /* "close" the socket, preserving the local port */
+               xs_tcp_reuse_connection(xprt);
+
+       if (!xprt->inet) {
+               struct sock *sk = sock->sk;
+
+               write_lock_bh(&sk->sk_callback_lock);
+
+               sk->sk_user_data = xprt;
+               xprt->old_data_ready = sk->sk_data_ready;
+               xprt->old_state_change = sk->sk_state_change;
+               xprt->old_write_space = sk->sk_write_space;
+               sk->sk_data_ready = xs_tcp_data_ready;
+               sk->sk_state_change = xs_tcp_state_change;
+               sk->sk_write_space = xs_tcp_write_space;
+
+               /* socket options */
+               sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
+               sock_reset_flag(sk, SOCK_LINGER);
+               tcp_sk(sk)->linger2 = 0;
+               tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
+
+               xprt_clear_connected(xprt);
+
+               /* Reset to new socket */
+               xprt->sock = sock;
+               xprt->inet = sk;
+
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+
+       /* Tell the socket layer to start connecting... */
+       status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
+                       sizeof(xprt->addr), O_NONBLOCK);
+       dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
+                       xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
+       if (status < 0) {
+               switch (status) {
+                       case -EINPROGRESS:
+                       case -EALREADY:
+                               goto out_clear;
+                       case -ECONNREFUSED:
+                       case -ECONNRESET:
+                               /* retry with existing socket, after a delay */
+                               break;
+                       default:
+                               /* get rid of existing socket, and retry */
+                               xs_close(xprt);
+                               break;
+               }
+       }
+out:
+       xprt_wake_pending_tasks(xprt, status);
+out_clear:
+       xprt_clear_connecting(xprt);
+}
+
+/**
+ * xs_connect - connect a socket to a remote endpoint
+ * @task: address of RPC task that manages state of connect request
+ *
+ * TCP: If the remote end dropped the connection, delay reconnecting.
+ *
+ * UDP socket connects are synchronous, but we use a work queue anyway
+ * to guarantee that even unprivileged user processes can set up a
+ * socket on a privileged port.
+ *
+ * If a UDP socket connect fails, the delay behavior here prevents
+ * retry floods (hard mounts).
+ */
+static void xs_connect(struct rpc_task *task)
+{
+       struct rpc_xprt *xprt = task->tk_xprt;
+
+       if (xprt_test_and_set_connecting(xprt))
+               return;
+
+       if (xprt->sock != NULL) {
+               dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
+                               xprt, xprt->reestablish_timeout / HZ);
+               schedule_delayed_work(&xprt->connect_worker,
+                                       xprt->reestablish_timeout);
+               xprt->reestablish_timeout <<= 1;
+               if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
+                       xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
+       } else {
+               dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
+               schedule_work(&xprt->connect_worker);
+
+               /* flush_scheduled_work can sleep... */
+               if (!RPC_IS_ASYNC(task))
+                       flush_scheduled_work();
+       }
+}
+
+static struct rpc_xprt_ops xs_udp_ops = {
+       .set_buffer_size        = xs_udp_set_buffer_size,
+       .reserve_xprt           = xprt_reserve_xprt_cong,
+       .release_xprt           = xprt_release_xprt_cong,
+       .connect                = xs_connect,
+       .send_request           = xs_udp_send_request,
+       .set_retrans_timeout    = xprt_set_retrans_timeout_rtt,
+       .timer                  = xs_udp_timer,
+       .release_request        = xprt_release_rqst_cong,
+       .close                  = xs_close,
+       .destroy                = xs_destroy,
+};
+
+static struct rpc_xprt_ops xs_tcp_ops = {
+       .reserve_xprt           = xprt_reserve_xprt,
+       .release_xprt           = xprt_release_xprt,
+       .connect                = xs_connect,
+       .send_request           = xs_tcp_send_request,
+       .set_retrans_timeout    = xprt_set_retrans_timeout_def,
+       .close                  = xs_close,
+       .destroy                = xs_destroy,
+};
+
+/**
+ * xs_setup_udp - Set up transport to use a UDP socket
+ * @xprt: transport to set up
+ * @to:   timeout parameters
+ *
+ */
+int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+{
+       size_t slot_table_size;
+
+       dprintk("RPC:      setting up udp-ipv4 transport...\n");
+
+       xprt->max_reqs = xprt_udp_slot_table_entries;
+       slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
+       xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+       if (xprt->slot == NULL)
+               return -ENOMEM;
+       memset(xprt->slot, 0, slot_table_size);
+
+       xprt->prot = IPPROTO_UDP;
+       xprt->port = xprt_max_resvport;
+       xprt->tsh_size = 0;
+       xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
+       /* XXX: header size can vary due to auth type, IPv6, etc. */
+       xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+
+       INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+       xprt->bind_timeout = XS_BIND_TO;
+       xprt->connect_timeout = XS_UDP_CONN_TO;
+       xprt->reestablish_timeout = XS_UDP_REEST_TO;
+       xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+       xprt->ops = &xs_udp_ops;
+
+       if (to)
+               xprt->timeout = *to;
+       else
+               xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
+
+       return 0;
+}
+
+/**
+ * xs_setup_tcp - Set up transport to use a TCP socket
+ * @xprt: transport to set up
+ * @to: timeout parameters
+ *
+ */
+int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+{
+       size_t slot_table_size;
+
+       dprintk("RPC:      setting up tcp-ipv4 transport...\n");
+
+       xprt->max_reqs = xprt_tcp_slot_table_entries;
+       slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
+       xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+       if (xprt->slot == NULL)
+               return -ENOMEM;
+       memset(xprt->slot, 0, slot_table_size);
+
+       xprt->prot = IPPROTO_TCP;
+       xprt->port = xprt_max_resvport;
+       xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
+       xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
+       xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+
+       INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+       xprt->bind_timeout = XS_BIND_TO;
+       xprt->connect_timeout = XS_TCP_CONN_TO;
+       xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+       xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+       xprt->ops = &xs_tcp_ops;
+
+       if (to)
+               xprt->timeout = *to;
+       else
+               xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
+
+       return 0;
+}
index cbb0ba34a60092a90f717440f110c06b30e63289..0db9e57013fdfaf738b608d688ea44b1d5e7cfe6 100644 (file)
@@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
 
 EXPORT_SYMBOL(xfrm_bundle_ok);
 
-/* Well... that's _TASK_. We need to scan through transformation
- * list and figure out what mss tcp should generate in order to
- * final datagram fit to mtu. Mama mia... :-)
- *
- * Apparently, some easy way exists, but we used to choose the most
- * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
- *
- * Consider this function as something like dark humour. :-)
- */
-static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
-{
-       int res = mtu - dst->header_len;
-
-       for (;;) {
-               struct dst_entry *d = dst;
-               int m = res;
-
-               do {
-                       struct xfrm_state *x = d->xfrm;
-                       if (x) {
-                               spin_lock_bh(&x->lock);
-                               if (x->km.state == XFRM_STATE_VALID &&
-                                   x->type && x->type->get_max_size)
-                                       m = x->type->get_max_size(d->xfrm, m);
-                               else
-                                       m += x->props.header_len;
-                               spin_unlock_bh(&x->lock);
-                       }
-               } while ((d = d->child) != NULL);
-
-               if (m <= mtu)
-                       break;
-               res -= (m - mtu);
-               if (res < 88)
-                       return mtu;
-       }
-
-       return res + dst->header_len;
-}
-
 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 {
        int err = 0;
@@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->negative_advice = xfrm_negative_advice;
                if (likely(dst_ops->link_failure == NULL))
                        dst_ops->link_failure = xfrm_link_failure;
-               if (likely(dst_ops->get_mss == NULL))
-                       dst_ops->get_mss = xfrm_get_mss;
                if (likely(afinfo->garbage_collect == NULL))
                        afinfo->garbage_collect = __xfrm_garbage_collect;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
@@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->check = NULL;
                        dst_ops->negative_advice = NULL;
                        dst_ops->link_failure = NULL;
-                       dst_ops->get_mss = NULL;
                        afinfo->garbage_collect = NULL;
                }
        }
index 9d206c282cf193a9867ff3b2c0078a683588a066..8b9a4747417d00bbf4c6b9050f59f7ca4eb24951 100644 (file)
@@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
 }
 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
 
+/*
+ * This function is NOT optimal.  For example, with ESP it will give an
+ * MTU that's usually two bytes short of being optimal.  However, it will
+ * usually give an answer that's a multiple of 4 provided the input is
+ * also a multiple of 4.
+ */
 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
 {
        int res = mtu;
index 9623a61dfc763df8511c6d515aac2705454ce64c..3d34f3de7e82d06e646b1e79f1f1b18f377ec1d8 100644 (file)
@@ -768,7 +768,7 @@ static int dummy_socket_getpeersec(struct socket *sock, char __user *optval,
        return -ENOPROTOOPT;
 }
 
-static inline int dummy_sk_alloc_security (struct sock *sk, int family, int priority)
+static inline int dummy_sk_alloc_security (struct sock *sk, int family, gfp_t priority)
 {
        return 0;
 }
index b13be15165f57d527ea6e334bf1ca8fe9606b6b8..447a1e0f48cb1c9bdfa520f874a76b3877b054bb 100644 (file)
@@ -262,7 +262,7 @@ static void superblock_free_security(struct super_block *sb)
 }
 
 #ifdef CONFIG_SECURITY_NETWORK
-static int sk_alloc_security(struct sock *sk, int family, int priority)
+static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
 {
        struct sk_security_struct *ssec;
 
@@ -3380,7 +3380,7 @@ out:
        return err;
 }
 
-static int selinux_sk_alloc_security(struct sock *sk, int family, int priority)
+static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
 {
        return sk_alloc_security(sk, family, priority);
 }
index b2d5db20ec8cfb9e70639eb59e726f9bade0c74d..559ead6367da0c0f8996b6d6b4fbadb2370cdef7 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/sizes.h>
 #include <asm/hardware/amba.h>
 
 #include <sound/driver.h>
index e72cec77f0db8ebc7bf911afd5e8e48eac8bd979..129abab5ce98a42fcd3a96fc587abe0da6e31ec9 100644 (file)
@@ -190,7 +190,7 @@ static void unmark_pages(struct page *page, int order)
  *
  * Returns the pointer of the buffer, or NULL if no enoguh memory.
  */
-void *snd_malloc_pages(size_t size, unsigned int gfp_flags)
+void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
 {
        int pg;
        void *res;
@@ -235,7 +235,7 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d
 {
        int pg;
        void *res;
-       unsigned int gfp_flags;
+       gfp_t gfp_flags;
 
        snd_assert(size > 0, return NULL);
        snd_assert(dma != NULL, return NULL);
index 207c2c54bf1de91cbc5e69648098289e89f62f47..0e4df8826eedf6128a3f0591ad72559205e83261 100644 (file)
@@ -51,7 +51,7 @@ static int snd_seq_gf1_copy_wave_from_stream(snd_gf1_ops_t *ops,
        gf1_wave_t *wp, *prev;
        gf1_xwave_t xp;
        int err;
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
        unsigned int real_size;
        
        gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
@@ -144,7 +144,8 @@ static int snd_seq_gf1_put(void *private_data, snd_seq_kinstr_t *instr,
        snd_gf1_ops_t *ops = (snd_gf1_ops_t *)private_data;
        gf1_instrument_t *ip;
        gf1_xinstrument_t ix;
-       int err, gfp_mask;
+       int err;
+       gfp_t gfp_mask;
 
        if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE)
                return -EINVAL;
index 67c24c8e8e7be4eb3d3a7848d5fef3963f88e741..7c19fbbc5d0ffeaa34b28c2d918196b4be738bcb 100644 (file)
@@ -129,7 +129,7 @@ static int snd_seq_iwffff_copy_wave_from_stream(snd_iwffff_ops_t *ops,
        iwffff_wave_t *wp, *prev;
        iwffff_xwave_t xp;
        int err;
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
        unsigned int real_size;
        
        gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
@@ -236,7 +236,7 @@ static int snd_seq_iwffff_put(void *private_data, snd_seq_kinstr_t *instr,
        iwffff_layer_t *lp, *prev_lp;
        iwffff_xlayer_t lx;
        int err;
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
 
        if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE)
                return -EINVAL;
index 6183d21510345c9cb9f1c427235ffcf6724d0c2c..17ab94e760737242187450416dab8b6225ca5a32 100644 (file)
@@ -57,7 +57,8 @@ static int snd_seq_simple_put(void *private_data, snd_seq_kinstr_t *instr,
        snd_simple_ops_t *ops = (snd_simple_ops_t *)private_data;
        simple_instrument_t *ip;
        simple_xinstrument_t ix;
-       int err, gfp_mask;
+       int err;
+       gfp_t gfp_mask;
        unsigned int real_size;
 
        if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE)
index 9a2f50f0b184e58c03239e30928075a97d9c34ca..222014cafc1a9050c5670b28e55a2ebc5a1c91c9 100644 (file)
@@ -116,7 +116,7 @@ typedef struct {
     const char *name;
     const char *name2;
     struct module *owner;
-    void *(*dma_alloc)(unsigned int, int);
+    void *(*dma_alloc)(unsigned int, gfp_t);
     void (*dma_free)(void *, unsigned int);
     int (*irqinit)(void);
 #ifdef MODULE
index 8daaf87664ba1b10ee542aa3af768daebb3ab433..59eb53f893184a22a26a2fa52b2bca055179c758 100644 (file)
@@ -114,7 +114,7 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
 /*** Low level stuff *********************************************************/
 
 
-static void *AtaAlloc(unsigned int size, int flags);
+static void *AtaAlloc(unsigned int size, gfp_t flags);
 static void AtaFree(void *, unsigned int size);
 static int AtaIrqInit(void);
 #ifdef MODULE
@@ -810,7 +810,7 @@ static TRANS transFalconExpanding = {
  * Atari (TT/Falcon)
  */
 
-static void *AtaAlloc(unsigned int size, int flags)
+static void *AtaAlloc(unsigned int size, gfp_t flags)
 {
        return atari_stram_alloc(size, "dmasound");
 }
index 2ceb46f1d40f9dfd14c99eb04ab2de157e0885c3..b2bf8bac842d1a1384d32845536b20160b2a9ce0 100644 (file)
@@ -271,7 +271,7 @@ int expand_read_bal;        /* Balance factor for expanding reads (not volume!) */
 
 /*** Low level stuff *********************************************************/
 
-static void *PMacAlloc(unsigned int size, int flags);
+static void *PMacAlloc(unsigned int size, gfp_t flags);
 static void PMacFree(void *ptr, unsigned int size);
 static int PMacIrqInit(void);
 #ifdef MODULE
@@ -614,7 +614,7 @@ tas_init_frame_rates(unsigned int *prop, unsigned int l)
 /*
  * PCI PowerMac, with AWACS, Screamer, Burgundy, DACA or Tumbler and DBDMA.
  */
-static void *PMacAlloc(unsigned int size, int flags)
+static void *PMacAlloc(unsigned int size, gfp_t flags)
 {
        return kmalloc(size, flags);
 }
index 558db5311e0625b8d437a8d18d01cb444fe662d7..d59f60b2641096119a898f67319812aac988b6e4 100644 (file)
@@ -69,7 +69,7 @@ static int write_sq_block_size_half, write_sq_block_size_quarter;
 /*** Low level stuff *********************************************************/
 
 
-static void *AmiAlloc(unsigned int size, int flags);
+static void *AmiAlloc(unsigned int size, gfp_t flags);
 static void AmiFree(void *obj, unsigned int size);
 static int AmiIrqInit(void);
 #ifdef MODULE
@@ -317,7 +317,7 @@ static inline void StopDMA(void)
        enable_heartbeat();
 }
 
-static void *AmiAlloc(unsigned int size, int flags)
+static void *AmiAlloc(unsigned int size, gfp_t flags)
 {
        return amiga_chip_alloc((long)size, "dmasound [Paula]");
 }
index 92c25a0174db4d5b439ab186faf03473ef58d679..1ddaa6284b08f745d800fffaf428bd41450d486a 100644 (file)
@@ -36,7 +36,7 @@ static int expand_data;       /* Data for expanding */
 /*** Low level stuff *********************************************************/
 
 
-static void *Q40Alloc(unsigned int size, int flags);
+static void *Q40Alloc(unsigned int size, gfp_t flags);
 static void Q40Free(void *, unsigned int);
 static int Q40IrqInit(void);
 #ifdef MODULE
@@ -358,7 +358,7 @@ static TRANS transQ40Compressing = {
 
 /*** Low level stuff *********************************************************/
 
-static void *Q40Alloc(unsigned int size, int flags)
+static void *Q40Alloc(unsigned int size, gfp_t flags)
 {
          return kmalloc(size, flags); /* change to vmalloc */
 }
index e0d0365453b346bb13117df9d7a6356d643f64a3..f1a2e2c2e02fa8c1b679695fb20d4d5e892c936b 100644 (file)
@@ -163,7 +163,7 @@ static const uint8_t snd_usbmidi_cin_length[] = {
 /*
  * Submits the URB, with error handling.
  */
-static int snd_usbmidi_submit_urb(struct urb* urb, int flags)
+static int snd_usbmidi_submit_urb(struct urb* urb, gfp_t flags)
 {
        int err = usb_submit_urb(urb, flags);
        if (err < 0 && err != -ENODEV)